diff --git a/.mci.yml b/.mci.yml index 621a1be4f1..20a8d8e8fd 100644 --- a/.mci.yml +++ b/.mci.yml @@ -476,7 +476,8 @@ functions: export RETRYABLE_READS_UNIFIED_TESTS_PATH="$(pwd)/../data/retryable-reads/unified" export RETRYABLE_WRITES_UNIFIED_TESTS_PATH="$(pwd)/../data/retryable-writes/unified" export SESSION_UNIFIED_TESTS_PATH="$(pwd)/../data/sessions/unified" - export TRANSACTIONS_TESTS_PATH="$(pwd)/../data/transactions" + export TRANSACTIONS_LEGACY_TESTS_PATH="$(pwd)/../data/transactions/legacy" + export TRANSACTIONS_UNIFIED_TESTS_PATH="$(pwd)/../data/transactions/unified" export UNIFIED_FORMAT_TESTS_PATH=$(pwd)/../data/unified-format export URI_OPTIONS_TESTS_PATH="$(pwd)/../data/uri-options" export VERSIONED_API_TESTS_PATH=$(pwd)/../data/versioned-api @@ -988,6 +989,21 @@ tasks: - func: "install_c_driver" - func: "test" + - name: compile_and_test_with_shared_libs_sharded_cluster + commands: + - func: "setup" + - func: "start_mongod" + vars: + TOPOLOGY: "sharded_cluster" + - func: "fetch_c_driver_source" + - func: "compile" + vars: + RUN_DISTCHECK: 1 + - func: "clone_drivers-evergreen-tools" + - func: "run_kms_servers" + - func: "install_c_driver" + - func: "test" + # Auto downloading the C driver in the C++ build does not currently include # support for libmongocrypt, therefore it is not configured with # -DENABLE_CLIENT_SIDE_ENCRYPTION=ON. For now, CSFLE tests will need to have @@ -1006,6 +1022,20 @@ tasks: - func: "run_kms_servers" - func: "test" + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt + commands: + - func: "setup" + - func: "start_mongod" + vars: + TOPOLOGY: "sharded_cluster" + - func: "install_c_driver" + - func: "compile" + vars: + RUN_DISTCHECK: 1 + - func: "clone_drivers-evergreen-tools" + - func: "run_kms_servers" + - func: "test" + - name: uninstall_check commands: - func: "setup" @@ -1314,6 +1344,12 @@ buildvariants: tasks: - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - matrix_name: "integration (sharded cluster)" + matrix_spec: {os: "ubuntu-1804", mongodb_version: "*"} + display_name: "${os} sharded cluster (MongoDB ${mongodb_version})" + tasks: + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt + - matrix_name: "auth" matrix_spec: {os: "*", mongodb_version: "latest"} display_name: "${os} ${mongodb_version} Auth" @@ -1327,15 +1363,6 @@ buildvariants: - name: test_versioned_api - name: test_versioned_api_accept_version_two - # Add matrix for specification test requirement of mongocryptd: - # "Drivers MUST run all tests with mongocryptd on at least one platform for all tested server versions (4.2+)." - - matrix_name: "mongocryptd" - matrix_spec: {os: "ubuntu-1804", mongodb_version: ["4.2", "4.4", "5.0", "latest"], use_mongocryptd: "true"} - display_name: "${os} (MongoDB ${mongodb_version}) with mongocryptd" - tasks: - - name: compile_and_test_with_shared_libs - - name: compile_and_test_with_shared_libs_replica_set - ####################################### # Linux Buildvariants # ####################################### @@ -1358,6 +1385,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - rhel90-large @@ -1381,6 +1409,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - rhel90-arm64-large @@ -1403,6 +1432,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - debian11-large @@ -1426,6 +1456,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set + - name: compile_and_test_with_shared_libs_sharded_cluster - name: build_example_with_add_subdirectory distros: - debian11-large @@ -1447,6 +1478,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - debian10-large @@ -1468,6 +1500,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set + - name: compile_and_test_with_shared_libs_sharded_cluster - name: build_example_with_add_subdirectory distros: - debian10-large @@ -1479,11 +1512,11 @@ buildvariants: matrix_spec: os: "ubuntu-1804" mongodb_version: ["4.2", "4.4", "5.0", "latest"] + use_mongocryptd: "true" display_name: "${os} (MongoDB ${mongodb_version}) with mongocryptd" tasks: - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt - expansions: - use_mongocryptd: true + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: ubuntu2004-release-latest display_name: "Ubuntu 20.04 Release (MongoDB Latest)" @@ -1501,6 +1534,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - ubuntu2004-large @@ -1522,6 +1556,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set + - name: compile_and_test_with_shared_libs_sharded_cluster - name: build_example_with_add_subdirectory distros: - ubuntu2004-large @@ -1544,6 +1579,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set_with_libmongocrypt + - name: compile_and_test_with_shared_libs_sharded_cluster_with_libmongocrypt - name: build_example_with_add_subdirectory distros: - ubuntu1804-build @@ -1565,6 +1601,7 @@ buildvariants: - name: compile_and_test_with_static_libs - name: compile_and_test_with_static_libs_extra_alignment - name: compile_and_test_with_shared_libs_replica_set + - name: compile_and_test_with_shared_libs_sharded_cluster - name: build_example_with_add_subdirectory distros: - ubuntu1804-build diff --git a/data/transactions/abort.json b/data/transactions/legacy/abort.json similarity index 100% rename from data/transactions/abort.json rename to data/transactions/legacy/abort.json diff --git a/data/transactions/bulk.json b/data/transactions/legacy/bulk.json similarity index 100% rename from data/transactions/bulk.json rename to data/transactions/legacy/bulk.json diff --git a/data/transactions/causal-consistency.json b/data/transactions/legacy/causal-consistency.json similarity index 100% rename from data/transactions/causal-consistency.json rename to data/transactions/legacy/causal-consistency.json diff --git a/data/transactions/commit.json b/data/transactions/legacy/commit.json similarity index 100% rename from data/transactions/commit.json rename to data/transactions/legacy/commit.json diff --git a/data/transactions/count.json b/data/transactions/legacy/count.json similarity index 100% rename from data/transactions/count.json rename to data/transactions/legacy/count.json diff --git a/data/transactions/create-collection.json b/data/transactions/legacy/create-collection.json similarity index 100% rename from data/transactions/create-collection.json rename to data/transactions/legacy/create-collection.json diff --git a/data/transactions/create-index.json b/data/transactions/legacy/create-index.json similarity index 100% rename from data/transactions/create-index.json rename to data/transactions/legacy/create-index.json diff --git a/data/transactions/delete.json b/data/transactions/legacy/delete.json similarity index 100% rename from data/transactions/delete.json rename to data/transactions/legacy/delete.json diff --git a/data/transactions/error-labels.json b/data/transactions/legacy/error-labels.json similarity index 99% rename from data/transactions/error-labels.json rename to data/transactions/legacy/error-labels.json index 2d3eed3ccc..0be19c731c 100644 --- a/data/transactions/error-labels.json +++ b/data/transactions/legacy/error-labels.json @@ -10,7 +10,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -102,7 +103,7 @@ } }, { - "description": "NotMaster errors contain transient label", + "description": "NotWritablePrimary errors contain transient label", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -962,12 +963,12 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/data/transactions/legacy/errors-client.json b/data/transactions/legacy/errors-client.json new file mode 100644 index 0000000000..15fae96fec --- /dev/null +++ b/data/transactions/legacy/errors-client.json @@ -0,0 +1,96 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topology": [ + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "error": true + }, + { + "name": "assertSessionTransactionState", + "object": "testRunner", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "Client side error when transaction is in progress", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "error": true + }, + { + "name": "assertSessionTransactionState", + "object": "testRunner", + "arguments": { + "session": "session0", + "state": "in_progress" + } + } + ] + } + ] +} diff --git a/data/transactions/errors.json b/data/transactions/legacy/errors.json similarity index 100% rename from data/transactions/errors.json rename to data/transactions/legacy/errors.json diff --git a/data/transactions/findOneAndDelete.json b/data/transactions/legacy/findOneAndDelete.json similarity index 100% rename from data/transactions/findOneAndDelete.json rename to data/transactions/legacy/findOneAndDelete.json diff --git a/data/transactions/findOneAndReplace.json b/data/transactions/legacy/findOneAndReplace.json similarity index 100% rename from data/transactions/findOneAndReplace.json rename to data/transactions/legacy/findOneAndReplace.json diff --git a/data/transactions/findOneAndUpdate.json b/data/transactions/legacy/findOneAndUpdate.json similarity index 100% rename from data/transactions/findOneAndUpdate.json rename to data/transactions/legacy/findOneAndUpdate.json diff --git a/data/transactions/insert.json b/data/transactions/legacy/insert.json similarity index 100% rename from data/transactions/insert.json rename to data/transactions/legacy/insert.json diff --git a/data/transactions/isolation.json b/data/transactions/legacy/isolation.json similarity index 100% rename from data/transactions/isolation.json rename to data/transactions/legacy/isolation.json diff --git a/data/transactions/mongos-pin-auto.json b/data/transactions/legacy/mongos-pin-auto.json similarity index 99% rename from data/transactions/mongos-pin-auto.json rename to data/transactions/legacy/mongos-pin-auto.json index f6ede52687..037f212f49 100644 --- a/data/transactions/mongos-pin-auto.json +++ b/data/transactions/legacy/mongos-pin-auto.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", diff --git a/data/transactions/mongos-recovery-token.json b/data/transactions/legacy/mongos-recovery-token.json similarity index 98% rename from data/transactions/mongos-recovery-token.json rename to data/transactions/legacy/mongos-recovery-token.json index 50c7349c1e..da4e9861d1 100644 --- a/data/transactions/mongos-recovery-token.json +++ b/data/transactions/legacy/mongos-recovery-token.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -179,6 +180,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" @@ -303,7 +307,8 @@ "data": { "failCommands": [ "commitTransaction", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/data/transactions/pin-mongos.json b/data/transactions/legacy/pin-mongos.json similarity index 99% rename from data/transactions/pin-mongos.json rename to data/transactions/legacy/pin-mongos.json index 5eb4fc57d9..485a3d9322 100644 --- a/data/transactions/pin-mongos.json +++ b/data/transactions/legacy/pin-mongos.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -875,7 +876,7 @@ "failCommands": [ "commitTransaction" ], - "errorCode": 50 + "errorCode": 51 } } } @@ -887,7 +888,7 @@ "errorLabelsOmit": [ "TransientTransactionError" ], - "errorCode": 50 + "errorCode": 51 } }, { @@ -1106,7 +1107,8 @@ "data": { "failCommands": [ "insert", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/data/transactions/read-concern.json b/data/transactions/legacy/read-concern.json similarity index 100% rename from data/transactions/read-concern.json rename to data/transactions/legacy/read-concern.json diff --git a/data/transactions/read-pref.json b/data/transactions/legacy/read-pref.json similarity index 100% rename from data/transactions/read-pref.json rename to data/transactions/legacy/read-pref.json diff --git a/data/transactions/reads.json b/data/transactions/legacy/reads.json similarity index 100% rename from data/transactions/reads.json rename to data/transactions/legacy/reads.json diff --git a/data/transactions/retryable-abort-errorLabels.json b/data/transactions/legacy/retryable-abort-errorLabels.json similarity index 100% rename from data/transactions/retryable-abort-errorLabels.json rename to data/transactions/legacy/retryable-abort-errorLabels.json diff --git a/data/transactions/retryable-abort.json b/data/transactions/legacy/retryable-abort.json similarity index 98% rename from data/transactions/retryable-abort.json rename to data/transactions/legacy/retryable-abort.json index 5a3aaa7bf8..13cc7c88fb 100644 --- a/data/transactions/retryable-abort.json +++ b/data/transactions/legacy/retryable-abort.json @@ -402,7 +402,7 @@ } }, { - "description": "abortTransaction succeeds after NotMaster", + "description": "abortTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -506,7 +506,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterOrSecondary", + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -610,7 +610,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterNoSlaveOk", + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1556,11 +1556,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1673,11 +1673,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1790,11 +1790,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1907,11 +1907,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } diff --git a/data/transactions/retryable-commit-errorLabels.json b/data/transactions/legacy/retryable-commit-errorLabels.json similarity index 100% rename from data/transactions/retryable-commit-errorLabels.json rename to data/transactions/legacy/retryable-commit-errorLabels.json diff --git a/data/transactions/retryable-commit.json b/data/transactions/legacy/retryable-commit.json similarity index 99% rename from data/transactions/retryable-commit.json rename to data/transactions/legacy/retryable-commit.json index 4895c6e0c2..49148c62d2 100644 --- a/data/transactions/retryable-commit.json +++ b/data/transactions/legacy/retryable-commit.json @@ -624,7 +624,7 @@ } }, { - "description": "commitTransaction succeeds after NotMaster", + "description": "commitTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -735,7 +735,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterOrSecondary", + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -846,7 +846,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterNoSlaveOk", + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1855,11 +1855,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1977,11 +1977,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2099,11 +2099,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2221,11 +2221,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } diff --git a/data/transactions/retryable-writes.json b/data/transactions/legacy/retryable-writes.json similarity index 100% rename from data/transactions/retryable-writes.json rename to data/transactions/legacy/retryable-writes.json diff --git a/data/transactions/run-command.json b/data/transactions/legacy/run-command.json similarity index 98% rename from data/transactions/run-command.json rename to data/transactions/legacy/run-command.json index 413f56b198..2f2a3a8815 100644 --- a/data/transactions/run-command.json +++ b/data/transactions/legacy/run-command.json @@ -248,7 +248,6 @@ }, { "description": "run command fails with explicit secondary read preference", - "skipReason": "read preferences in database::run_command are not supported", "operations": [ { "name": "startTransaction", diff --git a/data/transactions/test_files.txt b/data/transactions/legacy/test_files.txt similarity index 96% rename from data/transactions/test_files.txt rename to data/transactions/legacy/test_files.txt index 0d5d5c4b67..d73618c027 100644 --- a/data/transactions/test_files.txt +++ b/data/transactions/legacy/test_files.txt @@ -7,6 +7,7 @@ create-collection.json create-index.json delete.json error-labels.json +errors-client.json errors.json findOneAndDelete.json findOneAndReplace.json @@ -25,7 +26,7 @@ retryable-commit-errorLabels.json retryable-commit.json retryable-writes.json run-command.json -transaction-options.json transaction-options-repl.json +transaction-options.json update.json write-concern.json diff --git a/data/transactions/transaction-options-repl.json b/data/transactions/legacy/transaction-options-repl.json similarity index 100% rename from data/transactions/transaction-options-repl.json rename to data/transactions/legacy/transaction-options-repl.json diff --git a/data/transactions/transaction-options.json b/data/transactions/legacy/transaction-options.json similarity index 100% rename from data/transactions/transaction-options.json rename to data/transactions/legacy/transaction-options.json diff --git a/data/transactions/update.json b/data/transactions/legacy/update.json similarity index 100% rename from data/transactions/update.json rename to data/transactions/legacy/update.json diff --git a/data/transactions/write-concern.json b/data/transactions/legacy/write-concern.json similarity index 100% rename from data/transactions/write-concern.json rename to data/transactions/legacy/write-concern.json diff --git a/data/transactions/unified/do-not-retry-read-in-transaction.json b/data/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/data/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} diff --git a/data/transactions/unified/mongos-unpin.json b/data/transactions/unified/mongos-unpin.json new file mode 100644 index 0000000000..356f4fd9b8 --- /dev/null +++ b/data/transactions/unified/mongos-unpin.json @@ -0,0 +1,437 @@ +{ + "description": "mongos-unpin", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "mongos-unpin-db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "mongos-unpin-db", + "documents": [] + } + ], + "_yamlAnchors": { + "anchors": 24 + }, + "tests": [ + { + "description": "unpin after TransientTransactionError error on commit", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin after non-transient error on abort", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin after TransientTransactionError error on abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction write operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction read operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + } + ] +} diff --git a/data/transactions/unified/retryable-abort-handshake.json b/data/transactions/unified/retryable-abort-handshake.json new file mode 100644 index 0000000000..4ad56e2f2f --- /dev/null +++ b/data/transactions/unified/retryable-abort-handshake.json @@ -0,0 +1,204 @@ +{ + "description": "retryable abortTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "AbortTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/data/transactions/unified/retryable-commit-handshake.json b/data/transactions/unified/retryable-commit-handshake.json new file mode 100644 index 0000000000..d9315a8fc6 --- /dev/null +++ b/data/transactions/unified/retryable-commit-handshake.json @@ -0,0 +1,211 @@ +{ + "description": "retryable commitTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "CommitTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/data/transactions/unified/test_files.txt b/data/transactions/unified/test_files.txt new file mode 100644 index 0000000000..e5382c417a --- /dev/null +++ b/data/transactions/unified/test_files.txt @@ -0,0 +1,4 @@ +do-not-retry-read-in-transaction.json +mongos-unpin.json +retryable-abort-handshake.json +retryable-commit-handshake.json diff --git a/data/with_transaction/commit-retry.json b/data/with_transaction/commit-retry.json index d4b948ce1a..02e38460d0 100644 --- a/data/with_transaction/commit-retry.json +++ b/data/with_transaction/commit-retry.json @@ -293,7 +293,7 @@ } }, { - "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotMaster)", + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -304,6 +304,9 @@ "commitTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, diff --git a/examples/mongocxx/change_streams.cpp b/examples/mongocxx/change_streams.cpp index 204f370684..e8fe11ebff 100644 --- a/examples/mongocxx/change_streams.cpp +++ b/examples/mongocxx/change_streams.cpp @@ -27,14 +27,6 @@ namespace { -std::string get_server_version(const mongocxx::client& client) { - bsoncxx::builder::basic::document server_status{}; - server_status.append(bsoncxx::builder::basic::kvp("serverStatus", 1)); - bsoncxx::document::value output = client["test"].run_command(server_status.extract()); - - return bsoncxx::string::to_string(output.view()["version"].get_string().value); -} - // watch_forever iterates the change stream until an error occurs. void watch_forever(mongocxx::collection& collection) { mongocxx::options::change_stream options; @@ -107,11 +99,6 @@ int main(int argc, char* argv[]) { auto entry = pool.acquire(); auto collection = (*entry)[db][coll]; - if (get_server_version(*entry) < "3.6") { - std::cerr << "Change streams are only supported on Mongo versions >= 3.6." << std::endl; - return EXIT_FAILURE; - } - std::cout << "Watching for notifications on the collection " << db << "." << coll << std::endl; std::cout << "To observe a notification, try inserting a document." << std::endl; diff --git a/examples/mongocxx/mongodb.com/aggregation_examples.cpp b/examples/mongocxx/mongodb.com/aggregation_examples.cpp index fa248de44a..4bc244f57c 100644 --- a/examples/mongocxx/mongodb.com/aggregation_examples.cpp +++ b/examples/mongocxx/mongodb.com/aggregation_examples.cpp @@ -31,15 +31,7 @@ using namespace mongocxx; -std::string get_server_version(const client& client) { - bsoncxx::builder::basic::document server_status{}; - server_status.append(bsoncxx::builder::basic::kvp("serverStatus", 1)); - bsoncxx::document::value output = client["test"].run_command(server_status.extract()); - - return bsoncxx::string::to_string(output.view()["version"].get_string().value); -} - -void aggregation_examples(const mongocxx::client& client, const mongocxx::database& db) { +void aggregation_examples(const mongocxx::database& db) { { // Start Aggregation Example 1 using namespace bsoncxx::builder::basic; @@ -144,16 +136,9 @@ void aggregation_examples(const mongocxx::client& client, const mongocxx::databa auto cursor = db["air_alliances"].aggregate(p, mongocxx::options::aggregate{}); // End Aggregation Example 4 - // TODO: Remove version-check (CXX-1547) - // This example uses syntax added in mongo version 3.6. Our CI - // tooling runs tests with mongo version 3.4, so this example - // fails with a query exception. CXX-1547 tracks removing this - // version check once CI tooling uses a later version of the server. - if (get_server_version(client) >= "3.6") { - auto count = std::distance(cursor.begin(), cursor.end()); - if (count != 0L) { - throw std::logic_error("wrong count in example 4"); - } + auto count = std::distance(cursor.begin(), cursor.end()); + if (count != 0L) { + throw std::logic_error("wrong count in example 4"); } } } @@ -167,8 +152,11 @@ int main() { const mongocxx::client conn{mongocxx::uri{}}; auto const db = conn["documentation_examples"]; + // SERVER-79306: Ensure the database exists for consistent behavior with sharded clusters. + conn["documentation_examples"].create_collection("dummy"); + try { - aggregation_examples(conn, db); + aggregation_examples(db); } catch (const std::logic_error& e) { std::cerr << e.what() << std::endl; return EXIT_FAILURE; diff --git a/src/mongocxx/private/index_view.hh b/src/mongocxx/private/index_view.hh index 7732bfd387..61f041c0a7 100644 --- a/src/mongocxx/private/index_view.hh +++ b/src/mongocxx/private/index_view.hh @@ -76,13 +76,31 @@ class index_view::impl { bsoncxx::stdx::optional create_one(const client_session* session, const index_model& model, const options::index_view& options) { - bsoncxx::document::value result = - create_many(session, std::vector{model}, options); - bsoncxx::document::view result_view = result.view(); + const auto result = create_many(session, std::vector{model}, options); + auto result_view = result.view(); + + // SERVER-78611: sharded clusters may place fields in a raw response document instead of in + // the top-level document. + if (const auto raw = result_view["raw"]) { + // There should only be a single field in the raw response with the shard connection + // string as the key. e.g.: + // { + // 'raw': { + // 'shard01/localhost:27018,27019,27020': { + // ... # Raw response fields. + // } + // } + // } + // Using a for loop for convenience. + for (const auto& shard_response : raw.get_document().view()) { + result_view = shard_response.get_document().view(); + } + } + + const auto note = result_view["note"]; - if (result_view["note"] && - bsoncxx::string::to_string(result_view["note"].get_string().value) == - "all indexes already exist") { + if (note && + bsoncxx::string::to_string(note.get_string().value) == "all indexes already exist") { return bsoncxx::stdx::nullopt; } diff --git a/src/mongocxx/test/CMakeLists.txt b/src/mongocxx/test/CMakeLists.txt index 89cd216f52..d93ae628b2 100644 --- a/src/mongocxx/test/CMakeLists.txt +++ b/src/mongocxx/test/CMakeLists.txt @@ -256,7 +256,7 @@ set_tests_properties(command_monitoring_specs PROPERTIES ENVIRONMENT "COMMAND_MONITORING_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/command-monitoring") set_tests_properties(transactions_specs PROPERTIES - ENVIRONMENT "TRANSACTIONS_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/transactions") + ENVIRONMENT "TRANSACTIONS_LEGACY_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/transactions/legacy") set_property(TEST transactions_specs APPEND PROPERTY ENVIRONMENT "WITH_TRANSACTION_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/with_transaction") @@ -274,6 +274,7 @@ set_property(TEST unified_format_spec APPEND PROPERTY ENVIRONMENT "SESSION_UNIFIED_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/sessions/unified/" "RETRYABLE_READS_UNIFIED_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/retryable-reads/unified/" "RETRYABLE_WRITES_UNIFIED_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/retryable-writes/unified/" + "TRANSACTIONS_UNIFIED_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/transactions/unified" "UNIFIED_FORMAT_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/unified-format" "VERSIONED_API_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/versioned-api" "INDEX_MANAGEMENT_TESTS_PATH=${PROJECT_SOURCE_DIR}/../../data/index-management" diff --git a/src/mongocxx/test/client_session.cpp b/src/mongocxx/test/client_session.cpp index 2c17b8a0fb..5e26cd0ff7 100644 --- a/src/mongocxx/test/client_session.cpp +++ b/src/mongocxx/test/client_session.cpp @@ -854,6 +854,12 @@ TEST_CASE("with_transaction", "[session]") { // The following three tests are prose tests from the with_transaction spec. SECTION("prose tests for with_transaction") { SECTION("callback raises a custom error") { + // Multi-document transactions require server 4.2+. + if (compare_versions(get_server_version(test.client), "4.2") < 0) { + WARN("Skipping - MongoDB server 4.2 or newer required"); + return; + } + // Test an operation_exception REQUIRE_THROWS_MATCHES(session.with_transaction([](client_session*) { throw operation_exception{{}, "The meaning of life"}; diff --git a/src/mongocxx/test/collection.cpp b/src/mongocxx/test/collection.cpp index 86d6521f4e..25ed360caa 100644 --- a/src/mongocxx/test/collection.cpp +++ b/src/mongocxx/test/collection.cpp @@ -2676,9 +2676,9 @@ TEST_CASE("Cursor iteration", "[collection][cursor]") { type_str = "k_tailable_await"; // Improve execution time by reducing the amount of time the server waits for new - // results - // for this cursor. - opts.max_await_time(std::chrono::milliseconds{1}); + // results for this cursor. Note: may cause flaky test failures if the duration is too + // short. + opts.max_await_time(std::chrono::milliseconds{10}); run_test(); } @@ -2769,6 +2769,12 @@ TEST_CASE("Ensure that the WriteConcernError 'errInfo' object is propagated", "[ client mongodb_client{uri{}, test_util::add_test_server_api()}; + if (test_util::get_topology(mongodb_client) == "sharded" && + test_util::compare_versions(test_util::get_server_version(mongodb_client), "4.1.0") < 0) { + WARN("Skipping - failCommand on mongos requires 4.1+"); + return; + } + using bsoncxx::builder::basic::sub_document; auto err_info = builder::basic::document{}; err_info.append(kvp("writeConcern", [](sub_document sub_doc) { diff --git a/src/mongocxx/test/database.cpp b/src/mongocxx/test/database.cpp index e05e837948..0816ca897e 100644 --- a/src/mongocxx/test/database.cpp +++ b/src/mongocxx/test/database.cpp @@ -344,6 +344,10 @@ TEST_CASE("Database integration tests", "[database]") { return; } + // SERVER-79306: $listLocalSessions does not behave as expect if the database does not + // already exist on sharded clusters. + database.create_collection("dummy"); + auto session1 = mongo_client.start_session(); pipeline.list_local_sessions({}); diff --git a/src/mongocxx/test/index_view.cpp b/src/mongocxx/test/index_view.cpp index 636e4b347d..ca173e1154 100644 --- a/src/mongocxx/test/index_view.cpp +++ b/src/mongocxx/test/index_view.cpp @@ -249,6 +249,15 @@ TEST_CASE("create_many", "[index_view]") { bsoncxx::document::value result = indexes.create_many(models); bsoncxx::document::view result_view = result.view(); + + // SERVER-78611: sharded clusters may place fields in a raw response document instead of in + // the top-level document. + if (const auto raw = result_view["raw"]) { + for (const auto& shard_response : raw.get_document().view()) { + result_view = shard_response.get_document().view(); + } + } + REQUIRE((result_view["numIndexesAfter"].get_int32() - result_view["numIndexesBefore"].get_int32()) == 3); @@ -383,6 +392,14 @@ TEST_CASE("drop_all", "[index_view]") { bsoncxx::document::value result = indexes.create_many(models); bsoncxx::document::view result_view = result.view(); + // SERVER-78611: sharded clusters may place fields in a raw response document instead of + // in the top-level document. + if (const auto raw = result_view["raw"]) { + for (const auto& shard_response : raw.get_document().view()) { + result_view = shard_response.get_document().view(); + } + } + auto cursor1 = indexes.list(); REQUIRE((unsigned)std::distance(cursor1.begin(), cursor1.end()) == models.size() + 1); REQUIRE((unsigned)(result_view["numIndexesAfter"].get_int32() - @@ -404,6 +421,14 @@ TEST_CASE("drop_all", "[index_view]") { bsoncxx::document::value result = indexes.create_many(models); bsoncxx::document::view result_view = result.view(); + // SERVER-78611: sharded clusters may place fields in a raw response document instead of + // in the top-level document. + if (const auto raw = result_view["raw"]) { + for (const auto& shard_response : raw.get_document().view()) { + result_view = shard_response.get_document().view(); + } + } + auto cursor1 = indexes.list(); REQUIRE((unsigned)std::distance(cursor1.begin(), cursor1.end()) == models.size() + 1); REQUIRE((unsigned)(result_view["numIndexesAfter"].get_int32() - diff --git a/src/mongocxx/test/spec/monitoring.cpp b/src/mongocxx/test/spec/monitoring.cpp index 8bff829007..d0d9477295 100644 --- a/src/mongocxx/test/spec/monitoring.cpp +++ b/src/mongocxx/test/spec/monitoring.cpp @@ -120,13 +120,20 @@ void apm_checker::compare(bsoncxx::array::view expectations, CAPTURE(print_all()); for (auto expectation : expectations) { auto expected = expectation.get_document().view(); - REQUIRE(events_iter != _events.end()); + if (events_iter == _events.end()) { + FAIL("Not enough events occurred: expected exactly " + << std::distance(expectations.begin(), expectations.end()) << " events, but got " + << _events.size() << " events"); + } REQUIRE_BSON_MATCHES_V(*events_iter, expected, match_visitor); events_iter++; } - if (!allow_extra) - REQUIRE(events_iter == _events.end()); + if (!allow_extra && events_iter != _events.end()) { + FAIL_CHECK("Too many events occurred: expected exactly " + << std::distance(expectations.begin(), expectations.end()) << " events, but got " + << _events.size() << " events"); + } } void apm_checker::has(bsoncxx::array::view expectations) { @@ -145,12 +152,12 @@ bool apm_checker::should_ignore(stdx::string_view command_name) const { [command_name](stdx::string_view cmp) { return command_name == cmp; }); } -std::string apm_checker::print_all() { +std::string apm_checker::print_all() const { std::ostringstream output; output << "\n\n"; output << "APM Checker contents:\n"; for (const auto& event : _events) { - output << "APM event: " << bsoncxx::to_json(event) << '\n'; + output << "APM event: " << bsoncxx::to_json(event) << "\n\n"; } return std::move(output).str(); } diff --git a/src/mongocxx/test/spec/monitoring.hh b/src/mongocxx/test/spec/monitoring.hh index b105a3e4aa..101895f93e 100644 --- a/src/mongocxx/test/spec/monitoring.hh +++ b/src/mongocxx/test/spec/monitoring.hh @@ -49,7 +49,7 @@ class apm_checker { void clear(); void clear_events(); - std::string print_all(); + std::string print_all() const; using event_vector = std::vector; using iterator = event_vector::iterator; diff --git a/src/mongocxx/test/spec/operation.cpp b/src/mongocxx/test/spec/operation.cpp index 6336f0989b..bd6f19175f 100644 --- a/src/mongocxx/test/spec/operation.cpp +++ b/src/mongocxx/test/spec/operation.cpp @@ -125,7 +125,9 @@ bsoncxx::stdx::optional lookup_read_preference(document::view d client_session* operation_runner::_lookup_session(stdx::string_view key) { if (key.compare("session0") == 0) { return _session0; - } else { + } + + if (key.compare("session1") == 0) { return _session1; } @@ -134,8 +136,7 @@ client_session* operation_runner::_lookup_session(stdx::string_view key) { client_session* operation_runner::_lookup_session(document::view doc) { if (doc["session"]) { - stdx::string_view session_name = doc["session"].get_string().value; - return _lookup_session(session_name); + return _lookup_session(doc["session"].get_string().value); } return nullptr; } @@ -1243,22 +1244,6 @@ document::value operation_runner::_run_run_command(bsoncxx::document::view opera return result.extract(); } -document::value operation_runner::_run_configure_fail_point(bsoncxx::document::view operation) { - auto arguments = operation["arguments"].get_document().value; - auto command = arguments["failPoint"].get_document().value; - - const client_session* session = _lookup_session(arguments); - - read_preference rp; - uint32_t server_id = session->server_id(); - stdx::optional reply = (*_client)["admin"].run_command(command, server_id); - - auto result = builder::basic::document{}; - result.append(builder::basic::kvp("result", *reply)); - - return result.extract(); -} - document::value operation_runner::_create_index(const document::view& operation) { auto arguments = operation["arguments"]; auto session = _lookup_session(arguments.get_document().value); @@ -1378,17 +1363,40 @@ document::value operation_runner::run(document::view operation) { return _run_abort_transaction(operation); } else if (key.compare("runCommand") == 0) { return _run_run_command(operation); - } else if (key.compare("targetedFailPoint") == 0) { - return _run_configure_fail_point(operation); } else if (key.compare("assertSessionPinned") == 0) { const client_session* session = _lookup_session(operation["arguments"].get_document().value); - REQUIRE(session->server_id()); + REQUIRE(session); + REQUIRE(session->server_id() != 0); return empty_document; - } else if (key.compare("operationassertSessionUnpinned") == 0) { + } else if (key.compare("assertSessionUnpinned") == 0) { const client_session* session = _lookup_session(operation["arguments"].get_document().value); - REQUIRE(!session->server_id()); + REQUIRE(session); + REQUIRE(session->server_id() == 0); + return empty_document; + } else if (key.compare("assertSessionTransactionState") == 0) { + const auto arguments = operation["arguments"].get_document().value; + const client_session* session = _lookup_session(arguments); + REQUIRE(session); + const auto state = arguments["state"].get_string().value; + switch (session->get_transaction_state()) { + case client_session::transaction_state::k_transaction_none: + REQUIRE(state == stdx::string_view("none")); + break; + case client_session::transaction_state::k_transaction_starting: + REQUIRE(state == stdx::string_view("starting")); + break; + case client_session::transaction_state::k_transaction_in_progress: + REQUIRE(state == stdx::string_view("in_progress")); + break; + case client_session::transaction_state::k_transaction_committed: + REQUIRE(state == stdx::string_view("committed")); + break; + case client_session::transaction_state::k_transaction_aborted: + REQUIRE(state == stdx::string_view("aborted")); + break; + } return empty_document; } else if (key.compare("watch") == 0) { if (object.compare("collection") == 0) { @@ -1473,6 +1481,26 @@ document::value operation_runner::run(document::view operation) { operation["arguments"]["index"].get_string()); })); + return empty_document; + } else if (key.compare("targetedFailPoint") == 0) { + REQUIRE(object == stdx::string_view("testRunner")); + + const auto arguments = operation["arguments"].get_document().value; + + const auto session_ptr = _lookup_session(arguments); + REQUIRE(session_ptr); + auto& session = *session_ptr; + const auto server_id = session.server_id(); + + if (server_id == 0) { + FAIL("session object is not pinned to a mongos server"); + } + + const auto command = arguments["failPoint"].get_document().value; + REQUIRE(!command.empty()); + + session.client()["admin"].run_command(command, server_id); + return empty_document; } else { throw std::logic_error{"unsupported operation: " + string::to_string(key)}; diff --git a/src/mongocxx/test/spec/operation.hh b/src/mongocxx/test/spec/operation.hh index 85f56d3cb1..61f2deac41 100644 --- a/src/mongocxx/test/spec/operation.hh +++ b/src/mongocxx/test/spec/operation.hh @@ -79,7 +79,6 @@ class operation_runner { document::value _run_commit_transaction(document::view operation); document::value _run_abort_transaction(document::view operation); document::value _run_run_command(document::view operation); - document::value _run_configure_fail_point(bsoncxx::document::view operation); document::value _run_find_one(document::view operation); document::value _create_index(const document::view& operation); void _set_collection_options(document::view operation); diff --git a/src/mongocxx/test/spec/transactions.cpp b/src/mongocxx/test/spec/transactions.cpp index 322f239506..07af469f86 100644 --- a/src/mongocxx/test/spec/transactions.cpp +++ b/src/mongocxx/test/spec/transactions.cpp @@ -24,12 +24,21 @@ using namespace spec; TEST_CASE("Transactions spec automated tests", "[transactions_spec]") { instance::current(); - /* Tests that use operations that the C++ driver does not have. */ - std::set unsupported_transaction_tests = {"count.json"}; + // Tests that use operations that the C++ driver does not have. + std::set unsupported_transaction_tests = { + // C Driver does not support count helper. + "count.json", + }; - run_tests_in_suite( - "TRANSACTIONS_TESTS_PATH", &run_transactions_tests_in_file, unsupported_transaction_tests); + SECTION("Legacy") { + run_tests_in_suite("TRANSACTIONS_LEGACY_TESTS_PATH", + &run_transactions_tests_in_file, + unsupported_transaction_tests); + } - run_tests_in_suite("WITH_TRANSACTION_TESTS_PATH", &run_transactions_tests_in_file); + SECTION("Convenient API") { + run_tests_in_suite("WITH_TRANSACTION_TESTS_PATH", &run_transactions_tests_in_file); + } } + } // namespace diff --git a/src/mongocxx/test/spec/unified_tests/operations.cpp b/src/mongocxx/test/spec/unified_tests/operations.cpp index 5b0fa5c35d..5b8e323f3f 100644 --- a/src/mongocxx/test/spec/unified_tests/operations.cpp +++ b/src/mongocxx/test/spec/unified_tests/operations.cpp @@ -1962,6 +1962,36 @@ document::value operations::run(entity::map& entity_map, auto key = string::to_string(op["arguments"]["client"].get_string().value); return fail_point(entity_map, apm_map[key], op_view); } + if (name == "targetedFailPoint") { + REQUIRE(object == "testRunner"); + + const auto arguments = op_view["arguments"]; + + const auto fail_point = arguments["failPoint"].get_document().value; + REQUIRE(!fail_point.empty()); + + auto* const session_ptr = get_session(op_view, entity_map); + REQUIRE(session_ptr); + auto& session = *session_ptr; + const auto server_id = session.server_id(); + + // Test runners MUST error if the session is not pinned to a mongos server at the time + // this operation is executed. + if (server_id == 0) { + FAIL("session object is not pinned to a mongos server"); + } + + auto& client = session.client(); + + // If the driver exposes an API to target a specific server for a command, the test runner + // SHOULD use the client entity associated with the session to execute the + // configureFailPoint command. + client["admin"].run_command(fail_point, server_id); + + return make_document(kvp("uri", client.uri().to_string()), + kvp("failPoint", fail_point["configureFailPoint"].get_string()), + kvp("serverId", static_cast(server_id))); + } if (name == "findOneAndDelete") return find_one_and_delete( entity_map.get_collection(object), get_session(op_view, entity_map), op_view); @@ -2040,6 +2070,18 @@ document::value operations::run(entity::map& entity_map, auto& session = entity_map.get_client_session(session_name); return assert_session_transaction_state(session, op_view); } + if (name == "assertSessionPinned") { + auto& session = entity_map.get_client_session( + string::to_string(op["arguments"]["session"].get_string().value)); + REQUIRE(session.server_id() != 0); + return make_document(); + } + if (name == "assertSessionUnpinned") { + auto& session = entity_map.get_client_session( + string::to_string(op["arguments"]["session"].get_string().value)); + REQUIRE(session.server_id() == 0); + return make_document(); + } if (name == "dropCollection") { auto coll_name = string::to_string(op["arguments"]["collection"].get_string().value); auto& db = entity_map.get_database(object); diff --git a/src/mongocxx/test/spec/unified_tests/runner.cpp b/src/mongocxx/test/spec/unified_tests/runner.cpp index 37a821665d..5e1d28269c 100644 --- a/src/mongocxx/test/spec/unified_tests/runner.cpp +++ b/src/mongocxx/test/spec/unified_tests/runner.cpp @@ -218,14 +218,10 @@ bool equals_server_topology(const document::element& topologies) { // The server's topology will not change during the test. No need to make a round-trip for every // test file. - const static std::string actual = test_util::get_topology(); - const auto equals = [&](const bsoncxx::array::element& expected) { - return expected == value(actual) || - (expected == value("sharded") && actual == "sharded-replicaset"); - }; + const static auto actual = value(test_util::get_topology()); const auto t = topologies.get_array().value; - return std::end(t) != std::find_if(std::begin(t), std::end(t), equals); + return std::end(t) != std::find(std::begin(t), std::end(t), actual); } bool compatible_with_server(const bsoncxx::array::element& requirement) { @@ -350,43 +346,76 @@ std::string uri_options_to_string(document::view object) { return opts; } -std::string get_hostnames(document::view object) { - const auto default_uri = std::string{"localhost:27017"}; +std::string get_hostnames(bsoncxx::document::view object) { + const auto uri0 = mongocxx::uri("mongodb://localhost:27017"); + + // All test topologies should have either a mongod or mongos on localhost:27017. + const mongocxx::client client0{uri0, test_util::add_test_server_api()}; + REQUIRE_NOTHROW(client0.list_databases().begin()); + + // The topology must be consistent with what was set up by the test environment. + static constexpr auto one = "localhost:27017"; + static constexpr auto two = "localhost:27017,localhost:27018"; + static constexpr auto three = "localhost:27017,localhost:27018,localhost:27019"; + + const auto topology = test_util::get_topology(client0); - // Spec: This [useMultipleMongoses] option has no effect for non-sharded topologies. - if (!test_util::is_sharded_cluster()) { - return default_uri; + if (topology == "single") { + return one; // Single mongod. } - // Spec: If true and the topology is a sharded cluster, the test runner MUST assert that this - // MongoClient connects to multiple mongos hosts (e.g. by inspecting the connection string). - if (!object["useMultipleMongoses"] || !object["useMultipleMongoses"].get_bool()) - return default_uri; - - // from: https://www.mongodb.com/docs/manual/reference/config-database/#config.shards - // If the shard is a replica set, the host field displays the name of the replica set, then a - // slash, then a comma-separated list of the hostnames of each member of the replica set, as in - // the following example: - // { ... , "host" : "shard0001/localhost:27018,localhost:27019,localhost:27020", ... } - const auto host = test_util::get_hosts(); - const auto after_slash = ++std::find(std::begin(host), std::end(host), '/'); - REQUIRE(after_slash < std::end(host)); - - const auto hostnames = std::string{after_slash, std::end(host)}; - CAPTURE(host, hostnames); - - // require multiple mongos hosts - REQUIRE(std::end(hostnames) != std::find(std::begin(hostnames), std::end(hostnames), ',')); - return hostnames; + if (topology == "replicaset") { + return three; // Three replset members. + } + + if (topology == "sharded") { + const auto use_multiple_mongoses = object["useMultipleMongoses"]; + + if (use_multiple_mongoses) { + const auto value = use_multiple_mongoses.get_bool().value; + + if (value) { + const auto uri1 = mongocxx::uri("mongodb://localhost:27018"); + + // If true and the topology is a sharded cluster, the test runner MUST assert that + // this MongoClient connects to multiple mongos hosts (e.g. by inspecting the + // connection string). + const mongocxx::client client1{uri1, test_util::add_test_server_api()}; + + if (!client0["config"].has_collection("shards")) { + FAIL("missing required mongos on port 27017 with useMultipleMongoses=true"); + } + + if (!client1["config"].has_collection("shards")) { + FAIL("missing required mongos on port 27018 with useMultipleMongoses=true"); + } + + return two; // Two mongoses. + } else { + // If false and the topology is a sharded cluster, the test runner MUST ensure that + // this MongoClient connects to only a single mongos host (e.g. by modifying the + // connection string). + return one; // Single mongos. + } + } else { + // If this option is not specified and the topology is a sharded cluster, the test + // runner MUST NOT enforce any limit on the number of mongos hosts in the connection + // string and any tests using this client SHOULD NOT depend on a particular number of + // mongos hosts. + + // But we still only support exactly two mongoses. + return two; // Two mongoses. + } + } + + FAIL("unexpected topology: " << topology); + return {}; // -Wreturn-type } -void add_observe_events(options::apm& apm_opts, document::view object) { - using types::bson_value::value; - if (!object["observeEvents"]) +void add_observe_events(spec::apm_checker& apm, options::apm& apm_opts, document::view object) { + if (!object["observeEvents"]) { return; - - const auto name = string::to_string(object["id"].get_string().value); - auto& apm = get_apm_map()[name]; + } const auto observe_sensitive = object["observeSensitiveCommands"]; apm.observe_sensitive_events = observe_sensitive && observe_sensitive.get_bool(); @@ -407,13 +436,13 @@ void add_observe_events(options::apm& apm_opts, document::view object) { } } -void add_ignore_command_monitoring_events(document::view object) { - if (!object["ignoreCommandMonitoringEvents"]) +void add_ignore_command_monitoring_events(spec::apm_checker& apm, document::view object) { + if (!object["ignoreCommandMonitoringEvents"]) { return; + } + for (auto cme : object["ignoreCommandMonitoringEvents"].get_array().value) { CAPTURE(cme.get_string()); - const auto name = string::to_string(object["id"].get_string().value); - auto& apm = get_apm_map()[name]; apm.set_ignore_command_monitoring_event(string::to_string(cme.get_string().value)); } } @@ -650,8 +679,14 @@ client create_client(document::view object) { client_opts = test_util::add_test_server_api(); } - add_observe_events(apm_opts, object); - add_ignore_command_monitoring_events(object); + auto& apm = get_apm_map()[string::to_string(object["id"].get_string().value)]; + + add_observe_events(apm, apm_opts, object); + add_ignore_command_monitoring_events(apm, object); + + // The test runner MUST also ensure that the configureFailPoint command is excluded from the + // list of observed command monitoring events for this client (if applicable). + apm.set_ignore_command_monitoring_event("configureFailPoint"); CAPTURE(conn); return client{uri{conn}, client_opts.apm_opts(apm_opts)}; @@ -951,8 +986,15 @@ void assert_outcome(const array::element& test) { using std::end; using std::equal; - if (!test["outcome"]) + if (!test["outcome"]) { return; + } + + read_preference rp; + rp.mode(read_preference::read_mode::k_primary); + + read_concern rc; + rc.acknowledge_level(read_concern::level::k_local); for (const auto& outcome : test["outcome"].get_array().value) { CAPTURE(to_json(outcome.get_document())); @@ -964,6 +1006,31 @@ void assert_outcome(const array::element& test) { const auto db = get_entity_map().get_database_by_name(db_name); auto coll = db.collection(coll_name); + struct coll_state_guard_type { + mongocxx::collection& coll; + read_preference old_rp; + read_concern old_rc; + + coll_state_guard_type(mongocxx::collection& coll) : coll(coll) { + old_rp = coll.read_preference(); + old_rc = coll.read_concern(); + } + + ~coll_state_guard_type() { + try { + coll.read_preference(old_rp); + coll.read_concern(old_rc); + } catch (...) { + } + } + } coll_state_guard(coll); + + // The test runner MUST query each collection using the internal MongoClient, an ascending + // sort order on the `_id` field (i.e. `{ _id: 1 }`), a "primary" read preference, and a + // "local" read concern. + coll.read_preference(rp); + coll.read_concern(rc); + auto results = coll.find({}, options::find{}.sort(make_document(kvp("_id", 1)))); auto actual = results.begin(); @@ -977,18 +1044,55 @@ void assert_outcome(const array::element& test) { } } -struct disable_fail_point { +struct fail_point_guard_type { std::vector> fail_points; - void add_fail_point(const std::string& uri, const std::string& command) { - fail_points.emplace_back(uri, command); + fail_point_guard_type() = default; + + ~fail_point_guard_type() { + try { + for (const auto& f : fail_points) { + spec::disable_fail_point(f.first, {}, f.second); + } + } catch (...) { + } } - void operator()() const { - for (auto&& f : fail_points) { - spec::disable_fail_point(f.first, {}, f.second); + void add_fail_point(std::string uri, std::string command) { + fail_points.emplace_back(std::move(uri), std::move(command)); + } +}; + +void disable_targeted_fail_point(mongocxx::stdx::string_view uri, + std::uint32_t server_id, + mongocxx::stdx::string_view fail_point) { + const auto command_owner = + make_document(kvp("configureFailPoint", fail_point), kvp("mode", "off")); + const auto command = command_owner.view(); + + // Unlike in the legacy test runner, there are no tests (at time of writing) that require + // multiple attempts to disable a targetedFailPoint, so only one attempt should suffice. + mongocxx::client client = {mongocxx::uri{uri}, test_util::add_test_server_api()}; + client["admin"].run_command(command, server_id); +} + +struct targeted_fail_point_guard_type { + std::vector> fail_points; + + targeted_fail_point_guard_type() = default; + + ~targeted_fail_point_guard_type() { + try { + for (const auto& f : fail_points) { + disable_targeted_fail_point(std::get<0>(f), std::get<1>(f), std::get<2>(f)); + } + } catch (...) { } } + + void add_fail_point(std::string uri, std::uint32_t server_id, std::string command) { + fail_points.emplace_back(std::move(uri), server_id, std::move(command)); + } }; document::value bulk_write_result(const mongocxx::bulk_write_exception& e) { @@ -1058,7 +1162,8 @@ void run_tests(mongocxx::stdx::string_view test_description, document::view test continue; } - disable_fail_point disable_fail_point_fn{}; + fail_point_guard_type fail_point_guard; + targeted_fail_point_guard_type targeted_fail_point_guard; for (auto&& apm : get_apm_map()) { apm.second.clear_events(); @@ -1073,16 +1178,25 @@ void run_tests(mongocxx::stdx::string_view test_description, document::view test }(); try { - auto result = bsoncxx::builder::basic::make_document(); - result = operations::run(get_entity_map(), get_apm_map(), ops, state); + const auto result = + operations::run(get_entity_map(), get_apm_map(), ops, state); if (string::to_string(ops["object"].get_string().value) == "testRunner") { - if (string::to_string(ops["name"].get_string().value) == "failPoint") { - disable_fail_point_fn.add_fail_point( + const auto op_name = string::to_string(ops["name"].get_string().value); + + if (op_name == "failPoint") { + fail_point_guard.add_fail_point( string::to_string(result["uri"].get_string().value), string::to_string(result["failPoint"].get_string().value)); } + if (op_name == "targetedFailPoint") { + targeted_fail_point_guard.add_fail_point( + string::to_string(result["uri"].get_string().value), + static_cast(result["serverId"].get_int64().value), + string::to_string(result["failPoint"].get_string().value)); + } + // Special test operations return no result and are always expected to // succeed. These operations SHOULD NOT be combined with expectError, // expectResult, or saveResultAsEntity. @@ -1125,7 +1239,6 @@ void run_tests(mongocxx::stdx::string_view test_description, document::view test } } } - disable_fail_point_fn(); assert_events(ele); assert_outcome(ele); @@ -1231,6 +1344,10 @@ TEST_CASE("retryable writes unified format spec automated tests", "[unified_form CHECK(run_unified_format_tests_in_env_dir("RETRYABLE_WRITES_UNIFIED_TESTS_PATH")); } +TEST_CASE("transactions unified format spec automated tests", "[unified_format_spec]") { + CHECK(run_unified_format_tests_in_env_dir("TRANSACTIONS_UNIFIED_TESTS_PATH")); +} + TEST_CASE("versioned API spec automated tests", "[unified_format_spec]") { CHECK(run_unified_format_tests_in_env_dir("VERSIONED_API_TESTS_PATH")); } diff --git a/src/mongocxx/test/spec/util.cpp b/src/mongocxx/test/spec/util.cpp index d896ed2a98..8236ab65a8 100644 --- a/src/mongocxx/test/spec/util.cpp +++ b/src/mongocxx/test/spec/util.cpp @@ -29,7 +29,10 @@ #include #include #include +#include +#include #include +#include #include #include #include @@ -93,7 +96,11 @@ bool should_skip_spec_test(const client& client, document::view test) { "Deprecated count with collation", "Deprecated count without a filter", "Deprecated count with a filter", - "Deprecated count with skip and limit"}; + "Deprecated count with skip and limit", + + // CXX-2678: missing required runCommand interface to set readPreference. + "run command fails with explicit secondary read preference", + }; if (test["description"]) { std::string description = std::string(test["description"].get_string().value); @@ -205,6 +212,23 @@ void disable_fail_point(const client& client, stdx::string_view fail_point) { } } +struct fail_point_guard_type { + mongocxx::client& client; + std::string name; + + fail_point_guard_type(mongocxx::client& client, std::string name) + : client(client), name(std::move(name)) {} + + ~fail_point_guard_type() { + disable_fail_point(client, name); + } + + fail_point_guard_type(const fail_point_guard_type&) = delete; + fail_point_guard_type& operator=(const fail_point_guard_type&) = delete; + fail_point_guard_type(fail_point_guard_type&&) = delete; + fail_point_guard_type& operator=(fail_point_guard_type&&) = delete; +}; + void disable_fail_point(std::string uri_string, options::client client_opts, stdx::string_view fail_point) { @@ -212,6 +236,47 @@ void disable_fail_point(std::string uri_string, disable_fail_point(client, fail_point); } +void disable_targeted_fail_point(std::uint32_t server_id) { + const auto command_owner = + make_document(kvp("configureFailPoint", "failCommand"), kvp("mode", "off")); + const auto command = command_owner.view(); + + // Some transactions tests have a failCommand for "hello" repeat seven times. + for (int i = 0; i < kMaxHelloFailCommands; i++) { + try { + // Create a new client for every attempt to force server discovery from scratch to + // guarantee the hello or isMaster fail points are actually triggered on the required + // mongos. + mongocxx::client client = {uri{"mongodb://localhost:27017,localhost:27018"}, + test_util::add_test_server_api()}; + client["admin"].run_command(command, server_id); + break; + } catch (...) { + continue; + } + } +} + +struct targeted_fail_point_guard_type { + std::uint32_t server_id; + + targeted_fail_point_guard_type(std::uint32_t server_id) : server_id(server_id) { + REQUIRE(server_id != 0); + } + + ~targeted_fail_point_guard_type() { + try { + disable_targeted_fail_point(server_id); + } catch (...) { + } + } + + targeted_fail_point_guard_type(const targeted_fail_point_guard_type&) = delete; + targeted_fail_point_guard_type& operator=(const targeted_fail_point_guard_type&) = delete; + targeted_fail_point_guard_type(targeted_fail_point_guard_type&&) = delete; + targeted_fail_point_guard_type& operator=(targeted_fail_point_guard_type&&) = delete; +}; + void set_up_collection(const client& client, document::view test, string_view database_name, @@ -219,43 +284,52 @@ void set_up_collection(const client& client, write_concern wc_majority; wc_majority.acknowledge_level(write_concern::level::k_majority); - auto db = client[test[database_name].get_string().value]; - db.drop(); + const auto db_name = test[database_name].get_string().value; + const auto coll_name = test[collection_name].get_string().value; + // Create a collection object from the MongoClient, using the `database_name` and + // `collection_name` fields of the YAML file. + auto db = client[db_name]; + auto coll = db[coll_name]; + + // For compatibility with Client Side Encryption tests. bsoncxx::builder::basic::document opts; if (const auto ef = test["encrypted_fields"]) { opts.append(kvp("encryptedFields", ef.get_document().value)); } - auto coll_name = test[collection_name].get_string().value; - auto coll = db[coll_name]; - + // Drop the test collection, using writeConcern "majority". coll.drop(wc_majority, opts.view()); - coll = db.create_collection(coll_name, opts.view(), wc_majority); - // Set up JSON schema, if we have one + // For compatibility with Client Side Encryption tests. if (test["json_schema"]) { - validation_criteria validation{}; - validation.rule(test["json_schema"].get_document().value); - - auto cmd = bsoncxx::builder::basic::document{}; - cmd.append(kvp("collMod", coll_name)); - cmd.append(kvp("validator", [&](bsoncxx::builder::basic::sub_document subdoc) { - subdoc.append(kvp("$jsonSchema", test["json_schema"].get_document().value)); - })); - - db.run_command(cmd.extract()); + opts.append( + kvp("validator", + make_document(kvp("$jsonSchema", test["json_schema"].get_document().value)))); } + // Execute the "create" command to recreate the collection, using writeConcern "majority". + coll = db.create_collection(coll_name, opts.view(), wc_majority); + // Seed collection with data, if we have it - if (test["data"]) { + if (const auto data = test["data"]) { options::insert insert_opts; insert_opts.write_concern(wc_majority); - for (auto&& doc : test["data"].get_array().value) { + for (auto&& doc : data.get_array().value) { coll.insert_one(doc.get_document().value, insert_opts); } } + + // When testing against a sharded cluster run a `distinct` command on the newly created + // collection on all mongoses. + if (test_util::is_sharded_cluster(client)) { + auto s0 = mongocxx::client(uri("mongodb://localhost:27017")); + auto s1 = mongocxx::client(uri("mongodb://localhost:27018")); + + s0[db_name][coll_name].distinct("x", {}); + s1[db_name][coll_name].distinct("x", {}); + } } void initialize_collection(collection* coll, array::view initial_data) { @@ -411,7 +485,32 @@ void run_operation_check_result(document::view op, make_op_runner_fn make_op_run } uri get_uri(document::view test) { - std::string uri_string = "mongodb://localhost/?"; + std::string uri_string = "mongodb://localhost:27017/?"; + + if (test_util::is_sharded_cluster()) { + const auto use_multiple_mongoses = test["useMultipleMongoses"]; + if (use_multiple_mongoses && use_multiple_mongoses.get_bool().value) { + // If true, and the topology type is Sharded, the MongoClient for this test should be + // initialized with multiple mongos seed addresses. If false or omitted, only a single + // mongos address should be specified. + uri_string = "mongodb://localhost:27017,localhost:27018/?"; + + // Verify that both mongos are actually present. + const mongocxx::client client0 = {uri{"mongodb://localhost:27017"}, + test_util::add_test_server_api()}; + const mongocxx::client client1 = {uri{"mongodb://localhost:27018"}, + test_util::add_test_server_api()}; + + if (!client0["config"].has_collection("shards")) { + FAIL("missing required mongos on port 27017 with useMultipleMongoses=true"); + } + + if (!client1["config"].has_collection("shards")) { + FAIL("missing required mongos on port 27018 with useMultipleMongoses=true"); + } + } + } + auto add_opt = [&uri_string](std::string opt) { if (uri_string.back() != '?') { uri_string += '&'; @@ -483,60 +582,75 @@ void run_tests_in_suite(std::string ev, test_runner cb) { run_tests_in_suite(ev, cb, empty); } -void test_setup(document::view test, document::view test_spec) { - // Step 1. "clean up any open transactions from previous test failures" - client client{uri{}, test_util::add_test_server_api()}; +static void test_setup(document::view test, document::view test_spec) { + // Step 2: Create a MongoClient and call `client.admin.runCommand({killAllSessions: []})` to + // clean up any open transactions from previous test failures. + client client{get_uri(test), test_util::add_test_server_api()}; try { client["admin"].run_command(make_document(kvp("killAllSessions", make_array()))); - } catch (const operation_exception& e) { + } catch (const mongocxx::exception& e) { + // Ignore a command failure with error code 11601 ("Interrupted") to work around + // SERVER-38335. + if (e.code() != server_error_code(11601)) { + FAIL("unexpected exception during killAllSessions: " << e.what()); + } } - // Steps 2 - 5, set up new collection + // Steps 2-7. set_up_collection(client, test_spec); - // Step 6. "If failPoint is specified, its value is a configureFailPoint command" + // Step 8: If failPoint is specified, its value is a configureFailPoint command. configure_fail_point(client, test); } void parse_session_opts(document::view session_opts, options::client_session* out) { options::transaction txn_opts; if (session_opts["defaultTransactionOptions"]) { - auto rc = lookup_read_concern(session_opts["defaultTransactionOptions"].get_document()); - if (rc) { + if (auto rc = + lookup_read_concern(session_opts["defaultTransactionOptions"].get_document())) { txn_opts.read_concern(*rc); } - auto wc = lookup_write_concern(session_opts["defaultTransactionOptions"].get_document()); - if (wc) { + if (auto wc = + lookup_write_concern(session_opts["defaultTransactionOptions"].get_document())) { txn_opts.write_concern(*wc); } - auto rp = lookup_read_preference(session_opts["defaultTransactionOptions"].get_document()); - if (rp) { + if (auto rp = + lookup_read_preference(session_opts["defaultTransactionOptions"].get_document())) { txn_opts.read_preference(*rp); } + + if (auto cc = session_opts["causalConsistency"]) { + out->causal_consistency(cc.get_bool()); + } + + if (auto mct = session_opts["maxCommitTimeMS"]) { + txn_opts.max_commit_time_ms(std::chrono::milliseconds(mct.get_int64())); + } } out->default_transaction_opts(txn_opts); } using bsoncxx::stdx::string_view; -void run_transaction_operations(document::view test, - client* client, - string_view db_name, - string_view coll_name, - client_session* session0, - client_session* session1, - bool* fail_point_enabled, - bool throw_on_error = false) { +void run_transaction_operations( + document::view test, + client* client, + string_view db_name, + string_view coll_name, + client_session* session0, + client_session* session1, + stdx::optional* targeted_fail_point_guard, + const apm_checker& apm_checker, + bool throw_on_error = false) { auto operations = test["operations"].get_array().value; REQUIRE(session0); REQUIRE(session1); + REQUIRE(targeted_fail_point_guard); for (auto&& op : operations) { - *fail_point_enabled = - *fail_point_enabled || op.get_document().value["arguments"]["failPoint"]; std::string error_msg; optional server_error; optional exception; @@ -544,16 +658,22 @@ void run_transaction_operations(document::view test, std::error_code ec; INFO("Operation: " << bsoncxx::to_json(op.get_document().value)); - auto operation = op.get_document().value; + const auto operation = op.get_document().value; // Handle with_transaction separately. if (operation["name"].get_string().value.compare("withTransaction") == 0) { - auto session = [&]() { - if (operation["object"].get_string().value.compare("session0") == 0) { + const auto session = [&]() -> mongocxx::client_session* { + const auto object = operation["object"].get_string().value; + if (object.compare("session0") == 0) { return session0; - } else { + } + + if (object.compare("session1") == 0) { return session1; } + + FAIL("unexpected session object: " << object); + return nullptr; // -Wreturn-type }(); auto with_txn_test_cb = [&](client_session*) { @@ -568,7 +688,8 @@ void run_transaction_operations(document::view test, coll_name, session0, session1, - fail_point_enabled, + targeted_fail_point_guard, + apm_checker, true); }; @@ -579,23 +700,79 @@ void run_transaction_operations(document::view test, server_error = e.raw_server_error(); exception = e; ec = e.code(); + } catch (const mongocxx::logic_error& e) { + // CXX-1679: some tests trigger client errors that are thrown as logic_error rather + // than operation_exception (i.e. update without $ operator). + error_msg = e.what(); + exception.emplace(make_error_code(mongocxx::error_code(0))); + ec = e.code(); } } else { try { - database db = client->database(db_name); + // Create a Database object from the MongoClient, using the `database_name` field at + // the top level of the test file. + auto db = client->database(db_name); parse_database_options(operation, &db); - collection coll = db[coll_name]; + + // Create a Collection object from the Database, using the `collection_name` field + // at the top level of the test file. If collectionOptions or databaseOptions is + // present, create the Collection or Database object with the provided options, + // respectively. Otherwise create the object with the default options. + auto coll = db[coll_name]; parse_collection_options(operation, &coll); - operation_runner op_runner{&db, &coll, session0, session1, client}; - actual_result = op_runner.run(operation); + + // Set the targetedFailPoint guard early to account for the possibility of an + // exception being thrown after the targetedFailPoint operation but before + // activating the disable guard. There is no harm attempting to disable a fail point + // that hasn't been set. + if (operation["name"] && operation["name"].get_string().value == + stdx::string_view("targetedFailPoint")) { + const auto arguments = operation["arguments"]; + + const auto session = [&]() -> mongocxx::client_session* { + const auto value = arguments["session"].get_string().value; + if (value == stdx::string_view("session0")) { + return session0; + } + if (value == stdx::string_view("session1")) { + return session1; + } + FAIL("unexpected session name: " << value); + return nullptr; // -Wreturn-type + }(); + + // We expect and assume the name of the fail point is always "failCommand". To + // date, *all* legacy spec tests use "failCommand" as the fail point name. + REQUIRE(arguments["failPoint"]["configureFailPoint"].get_string().value == + stdx::string_view("failCommand")); + + // We expect at most one targetedFailPoint operation per test case. + REQUIRE(!(*targeted_fail_point_guard)); + + // When executing this operation, the test runner MUST keep a record of both the + // fail point and pinned mongos server so that the fail point can be disabled on + // the same mongos server after the test. + targeted_fail_point_guard->emplace(session->server_id()); + } + + actual_result = + operation_runner{&db, &coll, session0, session1, client}.run(operation); } catch (const operation_exception& e) { error_msg = e.what(); server_error = e.raw_server_error(); exception = e; ec = e.code(); + } catch (const mongocxx::logic_error& e) { + // CXX-1679: some tests trigger client errors that are thrown as logic_error rather + // than operation_exception (i.e. update without $ operator). + error_msg = e.what(); + exception.emplace(make_error_code(mongocxx::error_code(0))); + ec = e.code(); } } + CAPTURE(apm_checker.print_all()); + // "If the result document has an 'errorContains' field, verify that the method threw an // exception or returned an error, and that the value of the 'errorContains' field // matches the error string." @@ -604,7 +781,8 @@ void run_transaction_operations(document::view test, // Do a case insensitive check. auto error_contains = test_util::tolowercase(op["result"]["errorContains"].get_string().value); - REQUIRE(test_util::tolowercase(error_msg).find(error_contains) < error_msg.length()); + + REQUIRE_THAT(error_msg, Catch::Contains(error_contains, Catch::CaseSensitive::No)); } // "If the result document has an 'errorCodeName' field, verify that the method threw a @@ -665,134 +843,162 @@ void run_transaction_operations(document::view test, void run_transactions_tests_in_file(const std::string& test_path) { INFO("Test path: " << test_path); - auto test_spec = test_util::parse_test_file(test_path); + + const auto test_spec = test_util::parse_test_file(test_path); REQUIRE(test_spec); - auto test_spec_view = test_spec->view(); - auto db_name = test_spec_view["database_name"].get_string().value; - auto coll_name = test_spec_view["collection_name"].get_string().value; - auto tests = test_spec_view["tests"].get_array().value; + + const auto test_spec_view = test_spec->view(); + const auto db_name = test_spec_view["database_name"].get_string().value; + const auto coll_name = test_spec_view["collection_name"].get_string().value; + const auto tests = test_spec_view["tests"].get_array().value; /* we may not have a supported topology */ - if (should_skip_spec_test(client{uri{}, test_util::add_test_server_api()}, test_spec_view)) { + if (should_skip_spec_test({uri{}, test_util::add_test_server_api()}, test_spec_view)) { WARN("File skipped - " + test_path); return; } for (auto&& test : tests) { - bool fail_point_enabled = (bool)test["failPoint"]; - auto description = test["description"].get_string().value; - INFO("Test description: " << description); - if (should_skip_spec_test(client{uri{}, test_util::add_test_server_api()}, - test.get_document().value)) { - continue; - } - - // Steps 1-6. - test_setup(test.get_document().value, test_spec_view); - - // Step 7. "Create a new MongoClient client, with Command Monitoring listeners enabled." - options::client client_opts; - apm_checker apm_checker; - client_opts.apm_opts(apm_checker.get_apm_opts(true /* command_started_events_only */)); - client_opts = test_util::add_test_server_api(client_opts); - client client; - if (test["useMultipleMongoses"]) { - client = {uri{"mongodb://localhost:27017,localhost:27018"}, client_opts}; - } else { - client = {get_uri(test.get_document().value), client_opts}; - } - - /* individual test may contain a skipReason */ - if (should_skip_spec_test(client, test.get_document())) { - continue; - } + const auto description = string::to_string(test["description"].get_string().value); - options::client_session session0_opts; - options::client_session session1_opts; + SECTION(description) { + client setup_client{get_uri(test.get_document().value), + test_util::add_test_server_api()}; - // Step 8: "Call client.startSession twice to create ClientSession objects" - if (test["sessionOptions"] && test["sessionOptions"]["session0"]) { - parse_session_opts(test["sessionOptions"]["session0"].get_document().value, - &session0_opts); - } - if (test["sessionOptions"] && test["sessionOptions"]["session1"]) { - parse_session_opts(test["sessionOptions"]["session1"].get_document().value, - &session1_opts); - } - - document::value session_lsid0{{}}; - document::value session_lsid1{{}}; - - // We wrap this section in its own scope as a way to control when the client_session - // objects created inside get destroyed. On destruction, client_sessions can send - // an abortTransaction that some of the spec tests look for. + // Step 1: If the `skipReason` field is present, skip this test completely. + if (should_skip_spec_test(setup_client, test.get_document().value)) { + continue; + } - { - client_session session0 = client.start_session(session0_opts); - client_session session1 = client.start_session(session1_opts); - session_lsid0.reset(session0.id()); - session_lsid1.reset(session1.id()); + // Steps 2-8. + test_setup(test.get_document().value, test_spec_view); - // Step 9. Perform the operations. - apm_checker.clear(); + { + stdx::optional fail_point_guard; + if (test["failPoint"]) { + const auto fail_point_name = string::to_string( + test["failPoint"]["configureFailPoint"].get_string().value); + fail_point_guard.emplace(setup_client, fail_point_name); + } - run_transaction_operations(test.get_document().value, - &client, - db_name, - coll_name, - &session0, - &session1, - &fail_point_enabled); - - // Step 10. "Call session0.endSession() and session1.endSession." (done in destructors). - } - - // Step 11. Compare APM events. - test_util::match_visitor visitor = - [&](bsoncxx::stdx::string_view key, - bsoncxx::stdx::optional main, - bsoncxx::types::bson_value::view pattern) { - if (key.compare("lsid") == 0) { - REQUIRE(pattern.type() == type::k_string); - REQUIRE(main); - REQUIRE(main->type() == type::k_document); - auto session_name = pattern.get_string().value; - if (session_name.compare("session0") == 0) { - REQUIRE(test_util::matches(session_lsid0, main->get_document().value)); - } else { - REQUIRE(test_util::matches(session_lsid1, main->get_document().value)); + // Step 9: Create a new MongoClient `client`, with Command Monitoring listeners + // enabled. (Using a new MongoClient for each test ensures a fresh session pool that + // hasn't executed any transactions previously, so the tests can assert actual + // txnNumbers, starting from 1.) Pass this test's `clientOptions` if present + // (handled by `get_uri()`). + options::client client_opts; + apm_checker apm_checker; + client_opts.apm_opts( + apm_checker.get_apm_opts(true /* command_started_events_only */)); + client_opts = test_util::add_test_server_api(client_opts); + client client = {get_uri(test.get_document().value), client_opts}; + + options::client_session session0_opts; + options::client_session session1_opts; + + if (const auto session_opts = test["sessionOptions"]) { + if (session_opts["session0"]) { + parse_session_opts(test["sessionOptions"]["session0"].get_document().value, + &session0_opts); } - return test_util::match_action::k_skip; - } else if (pattern.type() == type::k_null) { - if (main) { - return test_util::match_action::k_not_equal; + if (session_opts["session1"]) { + parse_session_opts(test["sessionOptions"]["session1"].get_document().value, + &session1_opts); } - return test_util::match_action::k_skip; } - return test_util::match_action::k_skip; - }; - if (test["expectations"]) { - apm_checker.compare(test["expectations"].get_array().value, false, visitor); - } + document::value session_lsid0{{}}; + document::value session_lsid1{{}}; + + { + // Step 10: Call client.startSession twice to create ClientSession objects + // `session0` and `session1`, using the test's "sessionOptions" if they are + // present. + client_session session0 = client.start_session(session0_opts); + client_session session1 = client.start_session(session1_opts); + + // Save their lsids so they are available after calling `endSession`. + session_lsid0.reset(session0.id()); + session_lsid1.reset(session1.id()); + + // The test runner MUST also ensure that the configureFailPoint command is + // excluded from the list of observed command monitoring events for this client + // (if applicable). + apm_checker.clear(); + apm_checker.set_ignore_command_monitoring_event("configureFailPoint"); + + // If a test uses `targetedFailPoint`, disable the fail point after running all + // `operations` to avoid spurious failures in subsequent tests. + stdx::optional targeted_fail_point_guard; + + // Step 11. Perform the operations. + run_transaction_operations(test.get_document().value, + &client, + db_name, + coll_name, + &session0, + &session1, + &targeted_fail_point_guard, + apm_checker); + + // Step 12: Call session0.endSession() and session1.endSession (via + // client_session dtors). + } + + // Step 13. If the test includes a list of command-started events in expectations, + // compare them to the actual command-started events using the same logic as the + // legacy Command Monitoring Spec Tests runner, plus the rules in the + // Command-Started Events instructions. + test_util::match_visitor visitor = + [&](bsoncxx::stdx::string_view key, + bsoncxx::stdx::optional main, + bsoncxx::types::bson_value::view pattern) { + if (key.compare("lsid") == 0) { + REQUIRE(pattern.type() == type::k_string); + REQUIRE(main); + REQUIRE(main->type() == type::k_document); + auto session_name = pattern.get_string().value; + if (session_name.compare("session0") == 0) { + REQUIRE( + test_util::matches(session_lsid0, main->get_document().value)); + } else { + REQUIRE( + test_util::matches(session_lsid1, main->get_document().value)); + } + return test_util::match_action::k_skip; + } else if (pattern.type() == type::k_null) { + if (main) { + return test_util::match_action::k_not_equal; + } + return test_util::match_action::k_skip; + } else if (key.compare("upsert") == 0 || key.compare("multi") == 0) { + // libmongoc includes `multi: false` and `upsert: false`. + // Some tests do not include `multi: false` and `upsert: false` + // in expectations. See DRIVERS-2271 and DRIVERS-976. + return test_util::match_action::k_skip; + } + return test_util::match_action::k_skip; + }; - // Step 12. Disable the failpoint. - if (fail_point_enabled) { - disable_fail_point("mongodb://localhost:27017", client_opts); - if (test["useMultipleMongoses"]) { - disable_fail_point("mongodb://localhost:27018", client_opts); + if (test["expectations"]) { + apm_checker.compare(test["expectations"].get_array().value, false, visitor); + } + + // Step 14: If failPoint is specified, disable the fail point to avoid spurious + // failures in subsequent tests (fail_point_guard dtor). } - } - // Step 13. Compare the collection outcomes - if (test["outcome"] && test["outcome"]["collection"]) { - auto outcome_coll_name = coll_name; - if (test["outcome"]["collection"]["name"]) { - outcome_coll_name = test["outcome"]["collection"]["name"].get_string().value; + // Step 15: For each element in outcome: ... + if (test["outcome"] && test["outcome"]["collection"]) { + auto outcome_coll_name = coll_name; + if (test["outcome"]["collection"]["name"]) { + outcome_coll_name = test["outcome"]["collection"]["name"].get_string().value; + } + client client{get_uri(test.get_document().value), test_util::add_test_server_api()}; + auto coll = client[db_name][outcome_coll_name]; + test_util::check_outcome_collection( + &coll, test["outcome"]["collection"].get_document().value); } - auto coll = client[db_name][outcome_coll_name]; - test_util::check_outcome_collection(&coll, - test["outcome"]["collection"].get_document().value); } } } @@ -854,6 +1060,13 @@ void run_crud_tests_in_file(const std::string& test_path, uri test_uri) { configure_fail_point(client, test.get_document().value); + stdx::optional fail_point_guard; + if (test["failPoint"]) { + const auto fail_point_name = + string::to_string(test["failPoint"]["configureFailPoint"].get_string().value); + fail_point_guard.emplace(client, fail_point_name); + } + apm_checker.clear(); auto perform_op = [&database, &op_runner, &test, &outcome_collection_name](document::view operation) { @@ -919,11 +1132,6 @@ void run_crud_tests_in_file(const std::string& test_path, uri test_uri) { REQUIRE(test["expectations"].get_document().view().empty()); } } - - if (test["failPoint"]) { - disable_fail_point(client, - test["failPoint"]["configureFailPoint"].get_string().value); - } } } } diff --git a/src/mongocxx/test/transactions.cpp b/src/mongocxx/test/transactions.cpp index c9a73cc1fa..0e6acfeb57 100644 --- a/src/mongocxx/test/transactions.cpp +++ b/src/mongocxx/test/transactions.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include @@ -437,4 +438,70 @@ TEST_CASE("Transactions Documentation Examples", "[transactions]") { } } +TEST_CASE("Transactions Mongos Pinning Prose Tests", "[transactions]") { + instance::current(); + + if (test_util::compare_versions(test_util::get_server_version(), "4.1.6") < 0) { + WARN("Skipping - requires server 4.1.6+"); + return; + } + + if (test_util::get_topology() != "sharded") { + WARN("Skipping - requires sharded cluster topology"); + return; + } + + // @require_mongos_count_at_least(2) + { + mongocxx::client client{mongocxx::uri{"mongodb://localhost:27017"}}; + REQUIRE(client["config"].has_collection("shards")); + }; + { + mongocxx::client client{mongocxx::uri{"mongodb://localhost:27018"}}; + REQUIRE(client["config"].has_collection("shards")); + }; + + const auto uri = + mongocxx::uri("mongodb://localhost:27017,localhost:27018/?localThresholdMS=1000"); + + std::unordered_set ports; + + options::apm apm_opts; + apm_opts.on_command_started( + [&](const events::command_started_event& event) { ports.insert(event.port()); }); + options::client client_opts; + client_opts.apm_opts(apm_opts); + mongocxx::client client{uri, client_opts}; + + auto test = client["test"]["test"]; + test.insert_one(make_document()); + + auto s = client.start_session(); + s.start_transaction(); + test.insert_one(s, make_document()); + s.commit_transaction(); + + // Prose Test 1 + SECTION("Unpin for next transaction") { + for (int i = 0; i < 50; ++i) { + s.start_transaction(); + auto cursor = test.find(s, {}); + REQUIRE(cursor.begin() != cursor.end()); + s.commit_transaction(); + } + + REQUIRE(ports.size() > 1u); + } + + // Prose Test 2 + SECTION("Unpin for non-transaction operation") { + for (int i = 0; i < 50; ++i) { + auto cursor = test.find(s, {}); + REQUIRE(cursor.begin() != cursor.end()); + } + + REQUIRE(ports.size() > 1u); + } +} + } // namespace diff --git a/src/mongocxx/test_util/client_helpers.cpp b/src/mongocxx/test_util/client_helpers.cpp index c7673cdc98..5ede97d2a6 100644 --- a/src/mongocxx/test_util/client_helpers.cpp +++ b/src/mongocxx/test_util/client_helpers.cpp @@ -243,14 +243,16 @@ std::string replica_set_name(const client& client) { return ""; } +static bool is_replica_set(document::view reply) { + return static_cast(reply["setName"]); +} + bool is_replica_set(const client& client) { - auto reply = get_is_master(client); - return static_cast(reply.view()["setName"]); + return is_replica_set(get_is_master(client)); } -bool is_sharded_cluster(const client& client) { - const auto reply = get_is_master(client); - const auto msg = reply.view()["msg"]; +static bool is_sharded_cluster(document::view reply) { + const auto msg = reply["msg"]; if (!msg) { return false; @@ -259,6 +261,10 @@ bool is_sharded_cluster(const client& client) { return msg.get_string().value.compare("isdbgrid") == 0; } +bool is_sharded_cluster(const client& client) { + return is_sharded_cluster(get_is_master(client)); +} + std::string get_hosts(const client& client) { auto shards = get_shards(client); if (shards) @@ -267,18 +273,13 @@ std::string get_hosts(const client& client) { } std::string get_topology(const client& client) { - if (is_replica_set(client)) + const auto reply = get_is_master(client); + + if (is_replica_set(reply)) { return "replicaset"; + } - // from: https://www.mongodb.com/docs/manual/reference/config-database/#config.shards - // If the shard is a replica set, the host field displays the name of the replica set, then a - // slash, then a comma-separated list of the hostnames of each member of the replica set, as in - // the following example: - // { ... , "host" : "shard0001/localhost:27018,localhost:27019,localhost:27020", ... } - auto host = get_hosts(client); - if (!host.empty()) { - if (std::find(std::begin(host), std::end(host), '/') != std::end(host)) - return "sharded-replicaset"; + if (is_sharded_cluster(reply)) { return "sharded"; } @@ -442,17 +443,43 @@ std::string tolowercase(stdx::string_view view) { } void check_outcome_collection(mongocxx::collection* coll, bsoncxx::document::view expected) { + REQUIRE(coll); + + read_preference rp; + rp.mode(read_preference::read_mode::k_primary); + read_concern rc; rc.acknowledge_level(read_concern::level::k_local); - auto old_rc = coll->read_concern(); - coll->read_concern(rc); - options::find options{}; - options.sort(make_document(kvp("_id", 1))); + struct coll_state_guard_type { + mongocxx::collection& coll; + read_preference old_rp; + read_concern old_rc; + + coll_state_guard_type(mongocxx::collection& coll) : coll(coll) { + old_rp = coll.read_preference(); + old_rc = coll.read_concern(); + } + + ~coll_state_guard_type() { + try { + coll.read_preference(old_rp); + coll.read_concern(old_rc); + } catch (...) { + } + } + } coll_state_guard(*coll); + + // Ensure this find reads the latest data by using primary read preference with local read + // concern even when the MongoClient is configured with another read preference or read concern. + coll->read_preference(rp); + coll->read_concern(rc); using namespace std; - cursor actual = coll->find({}, options); - auto expected_data = expected["data"].get_array().value; + + const auto expected_data = expected["data"].get_array().value; + auto actual = coll->find({}, options::find().sort(make_document(kvp("_id", 1)))); + REQUIRE(equal(begin(expected_data), end(expected_data), begin(actual), @@ -461,7 +488,6 @@ void check_outcome_collection(mongocxx::collection* coll, bsoncxx::document::vie return true; })); REQUIRE(begin(actual) == end(actual)); - coll->read_concern(old_rc); } bool server_has_sessions(const client& conn) {