From a73e13b6b90a22bc5ebf67ea32573d5af6f0d87d Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 10:39:16 +0100 Subject: [PATCH 01/13] library + mongodbmulticluster implementation --- .evergreen-tasks.yml | 74 +-- .evergreen.yml | 72 +-- .../mongodbmulticluster/__init__.py | 0 .../fixtures/mongodb-user.yaml | 0 .../fixtures/mongodb-x509-user.yaml | 0 ...odbmulticluster-central-sts-override.yaml} | 0 .../mongodbmulticluster-multi-cluster.yaml} | 0 .../mongodbmulticluster-multi-dr.yaml} | 0 ...mongodbmulticluster-multi-pvc-resize.yaml} | 0 ...godbmulticluster-multi-split-horizon.yaml} | 0 ...ngodbmulticluster-multi-sts-override.yaml} | 0 .../fixtures/mongodbmulticluster-multi.yaml} | 0 ...multicluster-split-horizon-node-port.yaml} | 0 .../mongodbmulticluster-multi-m2m-group.yaml} | 0 .../mongodbmulticluster-multi-m2m-user.yaml} | 0 .../fixtures/oidc/oidc-user-multi.yaml | 0 ...multicluster-split-horizon-node-port.yaml} | 0 ...cluster_tls_no_mesh_2_clusters_eks_gke.py} | 17 +- ...multi_2_cluster_clusterwide_replicaset.py} | 100 +--- ...ulticluster_multi_2_cluster_replicaset.py} | 39 +- ...bmulticluster_multi_cluster_agent_flags.py | 54 ++ ...lti_cluster_automated_disaster_recovery.py | 104 ++++ ...r_multi_cluster_backup_restore_no_mesh.py} | 223 ++------ ...multicluster_multi_cluster_cli_recover.py} | 65 +-- ...multicluster_multi_cluster_clusterwide.py} | 105 +--- ...dbmulticluster_multi_cluster_dr_connect.py | 69 +++ ...bmulticluster_multi_cluster_enable_tls.py} | 40 +- ...mongodbmulticluster_multi_cluster_ldap.py} | 165 ++---- ...luster_multi_cluster_ldap_custom_roles.py} | 80 +-- ...ticluster_multi_cluster_oidc_m2m_group.py} | 28 +- ...lticluster_multi_cluster_oidc_m2m_user.py} | 28 +- ...dbmulticluster_multi_cluster_pvc_resize.py | 55 ++ ...ticluster_multi_cluster_reconcile_races.py | 109 ++++ ...uster_multi_cluster_recover_clusterwide.py | 235 ++++++++ ...multi_cluster_recover_network_partition.py | 92 ++++ ...bmulticluster_multi_cluster_replica_set.py | 138 +++++ ...ster_multi_cluster_replica_set_deletion.py | 63 +++ ...uster_replica_set_ignore_unknown_users.py} | 32 +- ...ulti_cluster_replica_set_member_options.py | 125 +++++ ...er_multi_cluster_replica_set_migration.py} | 44 +- ...er_multi_cluster_replica_set_scale_down.py | 112 ++++ ...ster_multi_cluster_replica_set_scale_up.py | 115 ++++ ...ter_multi_cluster_replica_set_test_mtls.py | 86 +++ ...luster_multi_cluster_scale_down_cluster.py | 106 ++++ ...cluster_multi_cluster_scale_up_cluster.py} | 81 +-- ...ti_cluster_scale_up_cluster_new_cluster.py | 130 +++++ ...mongodbmulticluster_multi_cluster_scram.py | 144 +++++ ...lticluster_multi_cluster_split_horizon.py} | 54 +- ...multicluster_multi_cluster_sts_override.py | 59 ++ ...bmulticluster_multi_cluster_tls_no_mesh.py | 198 +++++++ ...lticluster_multi_cluster_tls_with_scram.py | 175 ++++++ ...lticluster_multi_cluster_tls_with_x509.py} | 54 +- ...luster_multi_cluster_upgrade_downgrade.py} | 56 +- ...dbmulticluster_multi_cluster_validation.py | 25 + .../multi_cluster_backup_restore.py | 516 ------------------ .../tests/multicluster/shared/__init__.py | 0 ..._cluster_tls_no_mesh_2_clusters_eks_gke.py | 22 + .../multi_2_cluster_clusterwide_replicaset.py | 121 ++++ .../shared/multi_2_cluster_replicaset.py | 43 ++ .../{ => shared}/multi_cluster_agent_flags.py | 31 +- ...lti_cluster_automated_disaster_recovery.py | 59 +- .../shared/multi_cluster_backup_restore.py | 214 ++++++++ .../multi_cluster_backup_restore_no_mesh.py | 258 +++++++++ .../shared/multi_cluster_cli_recover.py | 80 +++ .../shared/multi_cluster_clusterwide.py | 123 +++++ .../{ => shared}/multi_cluster_dr_connect.py | 36 +- .../shared/multi_cluster_enable_tls.py | 46 ++ .../multicluster/shared/multi_cluster_ldap.py | 163 ++++++ .../shared/multi_cluster_ldap_custom_roles.py | 80 +++ .../shared/multi_cluster_oidc_m2m_group.py | 24 + .../shared/multi_cluster_oidc_m2m_user.py | 29 + .../{ => shared}/multi_cluster_pvc_resize.py | 35 +- .../multi_cluster_reconcile_races.py | 80 +-- .../multi_cluster_recover_clusterwide.py | 129 +---- ...multi_cluster_recover_network_partition.py | 47 +- .../{ => shared}/multi_cluster_replica_set.py | 80 +-- .../multi_cluster_replica_set_deletion.py | 37 +- ...luster_replica_set_ignore_unknown_users.py | 27 + ...ulti_cluster_replica_set_member_options.py | 100 +--- .../multi_cluster_replica_set_migration.py | 52 ++ .../multi_cluster_replica_set_scale_down.py | 78 +-- .../multi_cluster_replica_set_scale_up.py | 77 +-- .../multi_cluster_replica_set_test_mtls.py | 36 +- .../multi_cluster_scale_down_cluster.py | 70 +-- .../shared/multi_cluster_scale_up_cluster.py | 96 ++++ ...ti_cluster_scale_up_cluster_new_cluster.py | 76 +-- .../{ => shared}/multi_cluster_scram.py | 73 +-- .../shared/multi_cluster_split_horizon.py | 53 ++ .../multi_cluster_sts_override.py | 32 +- .../{ => shared}/multi_cluster_tls_no_mesh.py | 155 +----- .../multi_cluster_tls_with_scram.py | 107 +--- .../shared/multi_cluster_tls_with_x509.py | 63 +++ .../shared/multi_cluster_upgrade_downgrade.py | 59 ++ .../{ => shared}/multi_cluster_validation.py | 28 +- 94 files changed, 4315 insertions(+), 2671 deletions(-) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => mongodbmulticluster}/fixtures/mongodb-user.yaml (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => mongodbmulticluster}/fixtures/mongodb-x509-user.yaml (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-central-sts-override.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-central-sts-override.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-cluster.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-dr.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-pvc-resize.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-split-horizon.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi-sts-override.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/mongodb-multi.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/split-horizon-node-port.yaml => mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/oidc/mongodb-multi-m2m-group.yaml => mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/oidc/mongodb-multi-m2m-user.yaml => mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => mongodbmulticluster}/fixtures/oidc/oidc-user-multi.yaml (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{fixtures/split-horizon-node-ports/split-horizon-node-port.yaml => mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml} (100%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py => mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py} (91%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_2_cluster_clusterwide_replicaset.py => mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py} (66%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_2_cluster_replicaset.py => mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py} (65%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_backup_restore_no_mesh.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py} (67%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_cli_recover.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py} (56%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_clusterwide.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py} (60%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_enable_tls.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py} (64%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_ldap.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py} (57%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_ldap_custom_roles.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py} (71%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_oidc_m2m_group.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py} (57%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_oidc_m2m_user.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py} (65%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_replica_set_ignore_unknown_users.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py} (56%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_replica_set_migration.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py} (53%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_scale_up_cluster.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py} (60%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_split_horizon.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py} (71%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_tls_with_x509.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py} (69%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{multi_cluster_upgrade_downgrade.py => mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py} (52%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py delete mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_agent_flags.py (71%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_automated_disaster_recovery.py (66%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_dr_connect.py (55%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_pvc_resize.py (57%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_reconcile_races.py (75%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_recover_clusterwide.py (66%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_recover_network_partition.py (71%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set.py (73%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set_deletion.py (69%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set_member_options.py (63%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set_scale_down.py (50%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set_scale_up.py (63%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_replica_set_test_mtls.py (84%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_scale_down_cluster.py (54%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_scale_up_cluster_new_cluster.py (58%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_scram.py (69%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_sts_override.py (61%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_tls_no_mesh.py (52%) rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_tls_with_scram.py (55%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{ => shared}/multi_cluster_validation.py (53%) diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 38353607d..3a8a4825d 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -922,77 +922,77 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_replica_set tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_migration + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_migration tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_member_options + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_member_options tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_up + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster_new_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_down_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_down_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_down + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_deletion + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_deletion tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_mtls_test + - name: e2e_mongodbmulticluster_multi_cluster_mtls_test tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scram + - name: e2e_mongodbmulticluster_multi_cluster_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_sts_override + - name: e2e_mongodbmulticluster_multi_sts_override tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_scram + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_enable_tls + - name: e2e_mongodbmulticluster_multi_cluster_enable_tls tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_upgrade_downgrade + - name: e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade tags: [ "patch-run" ] commands: - func: e2e_test @@ -1003,12 +1003,12 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_tls_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test @@ -1018,7 +1018,7 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test @@ -1043,78 +1043,78 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_x509 + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_x509 tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap_custom_roles + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_specific_namespaces + - name: e2e_mongodbmulticluster_multi_cluster_specific_namespaces tags: [ "patch-run" ] commands: - func: e2e_test # TODO: not used in any variant - - name: e2e_multi_cluster_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_multi_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover + - name: e2e_mongodbmulticluster_multi_cluster_recover tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_network_partition + - name: e2e_mongodbmulticluster_multi_cluster_recover_network_partition tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_recover_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_agent_flags + - name: e2e_mongodbmulticluster_multi_cluster_agent_flags tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_ignore_unknown_users + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_validation + - name: e2e_mongodbmulticluster_multi_cluster_validation tags: [ "patch-run" ] commands: - func: e2e_test @@ -1179,13 +1179,13 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_pvc_resize + - name: e2e_mongodbmulticluster_multi_cluster_pvc_resize tags: [ "patch-run" ] commands: - func: e2e_test # this test is run, with an operator with race enabled - - name: e2e_om_reconcile_race_with_telemetry + - name: e2e_mongodbmulticluster_om_reconcile_race_with_telemetry tags: [ "patch-run" ] commands: - func: e2e_test @@ -1268,12 +1268,12 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_group + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_user + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user tags: [ "patch-run" ] commands: - func: e2e_test diff --git a/.evergreen.yml b/.evergreen.yml index 79b181e3e..2aca80943 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -830,7 +830,7 @@ task_groups: <<: *setup_group_multi_cluster <<: *setup_and_teardown_task tasks: - - e2e_om_reconcile_race_with_telemetry + - e2e_mongodbmulticluster_om_reconcile_race_with_telemetry <<: *teardown_group # e2e_operator_task_group includes the tests for the specific Operator configuration/behavior. They may deal with @@ -856,37 +856,37 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_replica_set - - e2e_multi_cluster_replica_set_migration - - e2e_multi_cluster_replica_set_member_options - - e2e_multi_cluster_recover - - e2e_multi_cluster_recover_clusterwide - - e2e_multi_cluster_specific_namespaces - - e2e_multi_cluster_scram - - e2e_multi_cluster_tls_with_x509 - - e2e_multi_cluster_tls_no_mesh - - e2e_multi_cluster_enable_tls - # e2e_multi_cluster_with_ldap - # e2e_multi_cluster_with_ldap_custom_roles - - e2e_multi_cluster_mtls_test - - e2e_multi_cluster_replica_set_deletion - - e2e_multi_cluster_replica_set_scale_up - - e2e_multi_cluster_scale_up_cluster - - e2e_multi_cluster_scale_up_cluster_new_cluster - - e2e_multi_cluster_replica_set_scale_down - - e2e_multi_cluster_scale_down_cluster - - e2e_multi_sts_override - - e2e_multi_cluster_tls_with_scram - - e2e_multi_cluster_upgrade_downgrade - - e2e_multi_cluster_backup_restore - - e2e_multi_cluster_backup_restore_no_mesh - - e2e_multi_cluster_disaster_recovery - - e2e_multi_cluster_multi_disaster_recovery - - e2e_multi_cluster_recover_network_partition - - e2e_multi_cluster_validation - - e2e_multi_cluster_agent_flags - - e2e_multi_cluster_replica_set_ignore_unknown_users - - e2e_multi_cluster_pvc_resize + - e2e_mongodbmulticluster_multi_cluster_replica_set + - e2e_mongodbmulticluster_multi_cluster_replica_set_migration + - e2e_mongodbmulticluster_multi_cluster_replica_set_member_options + - e2e_mongodbmulticluster_multi_cluster_recover + - e2e_mongodbmulticluster_multi_cluster_recover_clusterwide + - e2e_mongodbmulticluster_multi_cluster_specific_namespaces + - e2e_mongodbmulticluster_multi_cluster_scram + - e2e_mongodbmulticluster_multi_cluster_tls_with_x509 + - e2e_mongodbmulticluster_multi_cluster_tls_no_mesh + - e2e_mongodbmulticluster_multi_cluster_enable_tls + # e2e_mongodbmulticluster_multi_cluster_with_ldap + # e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles + - e2e_mongodbmulticluster_multi_cluster_mtls_test + - e2e_mongodbmulticluster_multi_cluster_replica_set_deletion + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down + - e2e_mongodbmulticluster_multi_cluster_scale_down_cluster + - e2e_mongodbmulticluster_multi_sts_override + - e2e_mongodbmulticluster_multi_cluster_tls_with_scram + - e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade + - e2e_mongodbmulticluster_multi_cluster_backup_restore + - e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh + - e2e_mongodbmulticluster_multi_cluster_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_recover_network_partition + - e2e_mongodbmulticluster_multi_cluster_validation + - e2e_mongodbmulticluster_multi_cluster_agent_flags + - e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users + - e2e_mongodbmulticluster_multi_cluster_pvc_resize - e2e_multi_cluster_sharded_geo_sharding - e2e_multi_cluster_sharded_scaling - e2e_multi_cluster_sharded_scaling_all_shard_overrides @@ -920,8 +920,8 @@ task_groups: - e2e_mongodb_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user - - e2e_multi_cluster_oidc_m2m_group - - e2e_multi_cluster_oidc_m2m_user + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user <<: *teardown_group @@ -930,8 +930,8 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_2_clusters_replica_set - - e2e_multi_cluster_2_clusters_clusterwide + - e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set + - e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide <<: *teardown_group - name: e2e_multi_cluster_om_appdb_task_group diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-central-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-central-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py similarity index 91% rename from docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py index 0cb42037d..111d90c1e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -19,14 +19,14 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture from tests.multicluster.conftest import cluster_spec_list +from ..shared import manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-rs" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" @fixture(scope="module") @@ -39,7 +39,7 @@ def cert_additional_domains() -> list[str]: @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str]) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["persistent"] = False # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) @@ -130,7 +130,7 @@ def server_certs( def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) def test_create_mongodb_multi( @@ -141,4 +141,11 @@ def test_create_mongodb_multi( member_cluster_clients: List[MultiClusterClient], member_cluster_names: List[str], ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py index 277f3fc9f..0bfcc4724 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py @@ -4,9 +4,6 @@ import pytest from kubetester import ( create_or_update_configmap, - create_or_update_secret, - read_configmap, - read_secret, ) from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version @@ -14,14 +11,12 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_2_cluster_clusterwide_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +MDB_RESOURCE = "multi-replica-set" @pytest.fixture(scope="module") @@ -41,7 +36,7 @@ def mongodb_multi_a_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -58,7 +53,7 @@ def mongodb_multi_b_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set", mdbb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -161,16 +156,12 @@ def mongodb_multi_b( return resource -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -180,34 +171,23 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_kube_config_file( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + mdbb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -215,24 +195,12 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, + testhelper.test_deploy_operator( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -240,35 +208,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.reload() - mongodb_multi_a["spec"]["authentication"] = ( - { - "agents": {"mode": "SCRAM"}, - "enabled": True, - "modes": ["SCRAM"], - }, - ) + testhelper.test_enable_mongodb_multi_nsa_auth(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py index fd3d273aa..3198d8cce 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py @@ -7,12 +7,11 @@ from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from .conftest import cluster_spec_list +from ..shared import multi_2_cluster_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" @@ -23,7 +22,7 @@ def mongodb_multi_unmarshalled( namespace: str, member_cluster_names: List[str], custom_mdb_version: str ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) return resource @@ -66,42 +65,30 @@ def mongodb_multi( return resource.create() -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_statefulset_is_created_across_multiple_clusters( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - cluster_one_client = member_cluster_clients[0] - cluster_one_sts = statefulsets[cluster_one_client.cluster_name] - assert cluster_one_sts.status.ready_replicas == 2 - - cluster_two_client = member_cluster_clients[1] - cluster_two_sts = statefulsets[cluster_two_client.cluster_name] - assert cluster_two_sts.status.ready_replicas == 1 + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) @skip_if_local -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py new file mode 100644 index 000000000..b267cc19f --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py @@ -0,0 +1,54 @@ +from typing import List + +import kubernetes +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_agent_flags as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # override agent startup flags + resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} + resource["spec"]["agent"]["logLevel"] = "DEBUG" + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(multi_cluster_operator, mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_multi_replicaset_has_agent_flags( + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_multi_replicaset_has_agent_flags(namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py new file mode 100644 index 000000000..207495ce9 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py @@ -0,0 +1,104 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_automated_disaster_recovery as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_mongodb_multi_leaves_running_state( + mongodb_multi: MongoDBMulti, +): + testhelper.test_mongodb_multi_leaves_running_state(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_replica_reaches_running(mongodb_multi: MongoDBMulti): + testhelper.test_replica_reaches_running(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): + testhelper.test_number_numbers_in_ac(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_sts_count_in_member_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_sts_count_in_member_cluster(mongodb_multi, member_cluster_names, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py similarity index 67% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py index 41707aa04..388288eb8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py @@ -1,9 +1,7 @@ # This test sets up ops manager in a multicluster "no-mesh" environment. # It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. -import datetime -import time -from typing import List, Optional, Tuple +from typing import List, Tuple import kubernetes import kubernetes.client @@ -12,8 +10,6 @@ from kubetester import ( create_or_update_configmap, create_or_update_secret, - get_default_storage_class, - read_service, try_load, ) from kubetester.certs import create_ops_manager_tls_certs @@ -27,14 +23,11 @@ from kubetester.omtester import OMTester from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase from pytest import fixture, mark -from tests.conftest import assert_data_got_restored, update_coredns_hosts -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} +from ..shared import multi_cluster_backup_restore_no_mesh as testhelper MONGODB_PORT = 30000 -HEAD_PATH = "/head/" OPLOG_RS_NAME = "my-mongodb-oplog" BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" USER_PASSWORD = "/qwerty@!#:" @@ -63,33 +56,6 @@ def ops_manager_certs( ) -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - -def new_om_data_store( - mdb: MongoDB, - id: str, - assignment_enabled: bool = True, - user_name: Optional[str] = None, - password: Optional[str] = None, -) -> dict: - return { - "id": id, - "uri": mdb.mongo_uri(user_name=user_name, password=password), - "ssl": mdb.is_tls_enabled(), - "assignmentEnabled": assignment_enabled, - } - - @fixture(scope="module") def ops_manager( namespace: str, @@ -135,7 +101,7 @@ def oplog_replica_set( name=OPLOG_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="development", mdb_name=OPLOG_RS_NAME, @@ -168,7 +134,7 @@ def blockstore_replica_set( name=BLOCKSTORE_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="blockstore", mdb_name=BLOCKSTORE_RS_NAME, @@ -297,114 +263,58 @@ def disable_istio( return None -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_update_coredns( replica_set_external_hosts: List[Tuple[str, str]], cluster_clients: dict[str, kubernetes.client.ApiClient], ): - """ - This test updates the coredns config in the member clusters to allow connecting to the other replica set members - through an external address. - """ - for cluster_name, cluster_api in cluster_clients.items(): - update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + testhelper.test_update_coredns(replica_set_external_hosts, cluster_clients) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestOpsManagerCreation: - """ - name: Ops Manager successful creation with backup and oplog stores enabled - description: | - Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state - eventually as it will wait for oplog db to be created - """ - def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) + testhelper.TestOpsManagerCreation.test_create_om(ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(namespace, central_cluster_client) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestBackupDatabasesAdded: - """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to - running state""" - def test_backup_mdbs_created( self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) + testhelper.TestOpsManagerCreation.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) + testhelper.TestOpsManagerCreation.test_oplog_user_created(oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) + testhelper.TestOpsManagerCreation.test_om_failed_oplog_no_user_ref(ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None + testhelper.TestOpsManagerCreation.test_fix_om(ops_manager, oplog_user) class TestBackupForMongodb: @@ -469,7 +379,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -574,7 +484,7 @@ def mongodb_multi_one( return resource.update() - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_setup_om_connection( self, replica_set_external_hosts: List[Tuple[str, str]], @@ -582,96 +492,35 @@ def test_setup_om_connection( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], ): - """ - test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - hosts = replica_set_external_hosts[:] - hosts.append((ip, interconnected_field)) - - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=hosts, - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", + testhelper.TestBackupForMongodb.test_setup_om_connection( + replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients ) - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(mongodb_multi_one) @skip_if_local - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_add_test_data(self, mongodb_multi_one_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_one_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + testhelper.TestBackupForMongodb.test_add_test_data(mongodb_multi_one_collection) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(project_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + testhelper.TestBackupForMongodb.test_pit_restore(project_one) - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti): - # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status - # right away. - # But the agent might still do work on the cluster, so we need to wait for that to happen. - mongodb_multi_one.assert_reaches_phase(Phase.Pending) - mongodb_multi_one.assert_reaches_phase(Phase.Running) + testhelper.TestBackupForMongodb.test_mdb_ready(mongodb_multi_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py index 16067e7ca..b4dbc3e97 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py @@ -7,14 +7,10 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase -from tests.conftest import ( - run_kube_config_creation_tool, - run_multi_cluster_recovery_tool, -) -from tests.constants import MULTI_CLUSTER_OPERATOR_NAME from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_cli_recover as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -27,7 +23,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -64,74 +60,43 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], namespace: str, ): - run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) - # deploy the operator without the final cluster - operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) - operator.assert_is_running() + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_add_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_add_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() + testhelper.test_mongodb_multi_recovers_adding_cluster(mongodb_multi, member_cluster_names) - mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) - -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() - - last_transition_time = mongodb_multi.get_status_last_transition_time() - - mongodb_multi["spec"]["clusterSpecList"].pop(0) - mongodb_multi.update() - mongodb_multi.assert_state_transition_happens(last_transition_time) - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py index bda9be3c2..d173b70d4 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py @@ -1,25 +1,23 @@ import os -import time from typing import Dict, List import kubernetes from kubernetes import client -from kubetester import create_or_update_configmap, create_or_update_secret, read_secret -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import ( _install_multi_cluster_operator, run_kube_config_creation_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import cluster_spec_list -from ..constants import MULTI_CLUSTER_OPERATOR_NAME -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_cluster_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" @fixture(scope="module") @@ -44,7 +42,7 @@ def mongodb_multi_a( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -61,7 +59,7 @@ def mongodb_multi_b( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -75,7 +73,7 @@ def unmanaged_mongodb_multi( unmanaged_mdb_ns: str, member_cluster_names: List[str], ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", unmanaged_mdb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, unmanaged_mdb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -114,7 +112,7 @@ def install_operator( ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_namespaces( namespace: str, mdba_ns: str, @@ -125,38 +123,19 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_namespaces( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + unmanaged_mdb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - unmanaged_mdb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -164,32 +143,22 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@mark.e2e_multi_cluster_clusterwide +@mark.e2e_mongodbmulticluster_multi_cluster_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_deploy_operator(install_operator: Operator): - install_operator.assert_is_running() + testhelper.test_deploy_operator(install_operator) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -197,35 +166,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti): - """ - For an unmanaged resource, the status should not be updated! - """ - for i in range(10): - time.sleep(5) - - unmanaged_mongodb_multi.reload() - assert "status" not in unmanaged_mongodb_multi + testhelper.test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py new file mode 100644 index 000000000..863499e1e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py @@ -0,0 +1,69 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + +from ..shared import multi_cluster_dr_connect as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping +# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_mongodbmulticluster_multi_cluster_dr local=true +@pytest.fixture(scope="module") +def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-dr.yaml"), MDB_RESOURCE, namespace) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + # return resource.load() + return resource.create() + + +@pytest.fixture(scope="module") +def mongodb_multi_collection(mongodb_multi: MongoDBMulti): + collection = mongodb_multi.tester().client["testdb"] + return collection["testcollection"] + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_kube_config_file(cluster_clients: Dict): + testhelper.test_create_kube_config_file(cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +@pytest.mark.flaky(reruns=100, reruns_delay=6) +def test_add_test_data(mongodb_multi_collection): + testhelper.test_add_test_data(mongodb_multi_collection) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_delete_member_3_cluster(): + testhelper.test_delete_member_3_cluster() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable_after_deletetion(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): + testhelper.test_add_test_data_after_deletion(mongodb_multi_collection, capsys) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py similarity index 64% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py index 86ae862d7..ef31eaff6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py @@ -1,29 +1,25 @@ from typing import List import kubernetes -from kubetester import read_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_enable_tls as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) return resource @@ -57,17 +53,17 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_create_mongodb_multi(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi, namespace) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_enabled_tls_mongodb_multi( mongodb_multi: MongoDBMulti, namespace: str, @@ -75,20 +71,6 @@ def test_enabled_tls_mongodb_multi( multi_cluster_issuer_ca_configmap: str, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.load() - mongodb_multi["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) - - # assert the presence of the generated pem certificates in each member cluster - for client in member_cluster_clients: - read_secret( - namespace=namespace, - name=BUNDLE_PEM_SECRET_NAME, - api_client=client.api_client, - ) + testhelper.test_enabled_tls_mongodb_multi( + mongodb_multi, namespace, server_certs, multi_cluster_issuer_ca_configmap, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py index af460bbbe..d4d6aa205 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py @@ -1,29 +1,25 @@ -import time from typing import Dict, List import kubernetes -from kubetester import create_secret, wait_until -from kubetester.automation_config_tester import AutomationConfigTester +from kubetester import create_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, Role, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import get_multi_cluster_operator_installation_config from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") @@ -39,7 +35,7 @@ def mongodb_multi_unmarshalled( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which # cause MDB process to exit. It might be a good idea to try uncommenting it after migrating to newer EVG hosts. @@ -168,179 +164,86 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic - recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). - """ - mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + testhelper.test_mongodb_multi_pending(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. - Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. - """ - - def wait_for_ac_exists() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - _ = ac["ldap"]["transportSecurity"] - _ = ac["version"] - return True - except KeyError: - return False - - wait_until(wait_for_ac_exists, timeout=200) - current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] - - def wait_for_ac_pushed() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - transport_security = ac["ldap"]["transportSecurity"] - new_version = ac["version"] - if transport_security != "none": - return False - if new_version <= current_version: - return False - return True - except KeyError: - return False - - wait_until(wait_for_ac_pushed, timeout=500) - - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource.update() + testhelper.test_turn_tls_on_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet - goes into running state. - """ - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + testhelper.test_multi_replicaset_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti): - """ - This function restores the initial desired security configuration to carry on with the next tests normally. - """ - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" - - resource.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_restore_mongodb_multi_ldap_configuration(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ldap_user_created_and_can_authenticate(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_ldap_user_created_and_can_authenticate(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - expected_roles = { - ("admin", "clusterAdmin"), - ("admin", "readWriteAnyDatabase"), - ("admin", "dbAdminAnyDatabase"), - } - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_expected_users(1) - ac.assert_has_user(user_ldap["spec"]["username"]) - ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) - ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) - ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) - - assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] - assert "timeoutMS" in ac.automation_config["ldap"] - assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 - assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + testhelper.test_ops_manager_state_correctly_updated(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_ldap_agent(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_names): - mongodb_multi.reload() - mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_names) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_new_ldap_user_can_authenticate_after_scaling( mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str ): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_new_ldap_user_can_authenticate_after_scaling(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_disable_agent_auth(mongodb_multi: MongoDBMulti): - mongodb_multi.reload() - mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False - mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_disable_agent_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_distest_mongodb_multi_connectivity_with_no_authable_agent_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_no_auth(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py index 6f472e6f2..89f1937f8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py @@ -2,31 +2,27 @@ import kubernetes from kubetester import create_secret -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap_custom_roles as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which @@ -151,84 +147,44 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi_with_ldap(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_collection(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo2", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_collection(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo2", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_automation_config_has_roles(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - role = { - "role": "cn=users,ou=groups,dc=example,dc=org", - "db": "admin", - "privileges": [ - {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, - { - "actions": ["insert", "find"], - "resource": {"collection": "", "db": "admin"}, - }, - ], - "authenticationRestrictions": [], - } - tester.assert_expected_role(role_index=0, expected_value=role) + testhelper.test_automation_config_has_roles(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py index ef8a2c582..41978b1d3 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py @@ -2,15 +2,14 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase -from kubetester.mongodb_multi import MongoDBMulti, MultiClusterClient -from kubetester.mongotester import ReplicaSetTester +from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_group as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -21,7 +20,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-group.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-group.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -38,21 +39,16 @@ def mongodb_multi( return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_group +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_oidc_replica_set(mongodb_multi) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.test_assert_connectivity(mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(0) - tester.assert_authoritative_set(True) + testhelper.test_ops_manager_state_updated_correctly(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py index 3faa266f4..ad41749d0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py @@ -2,16 +2,15 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser -from kubetester.mongotester import ReplicaSetTester from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_user as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -22,7 +21,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-user.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-user.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -49,24 +50,19 @@ def oidc_user(namespace) -> MongoDBUser: return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_user +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_oidc_replica_set(mongodb_multi) def test_create_user(self, oidc_user: MongoDBUser): - oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + testhelper.test_create_user(oidc_user) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.test_assert_connectivity(mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(1) - tester.assert_authoritative_set(True) + testhelper.test_ops_manager_state_updated_correctly(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py new file mode 100644 index 000000000..b88229e62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py @@ -0,0 +1,55 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_pvc_resize as testhelper + +RESOURCE_NAME = "multi-replica-set-pvc-resize" + + +@pytest.fixture(scope="module") +def mongodb_multi( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_resize_pvc_state_changes(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_finished( + mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_mongodb_multi_resize_finished(mongodb_multi, namespace, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py new file mode 100644 index 000000000..19767cc06 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py @@ -0,0 +1,109 @@ +# It's intended to check for reconcile data races. +from typing import Optional + +import kubernetes.client +import pytest +from kubetester import find_fixture, try_load +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager + +from ..shared import multi_cluster_reconcile_races as testhelper + + +@pytest.fixture(scope="module") +def ops_manager( + namespace: str, + custom_version: Optional[str], + custom_appdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om") + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + + try_load(resource) + return resource + + +@pytest.fixture(scope="module") +def ops_manager2( + namespace: str, + custom_version: Optional[str], + custom_appdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om2") + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + + try_load(resource) + return resource + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_om(ops_manager: MongoDBOpsManager, ops_manager2: MongoDBOpsManager): + testhelper.test_create_om(ops_manager, ops_manager2) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_om_ready(ops_manager: MongoDBOpsManager): + testhelper.test_om_ready(ops_manager) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_om2_ready(ops_manager2: MongoDBOpsManager): + testhelper.test_om2_ready(ops_manager2) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): + testhelper.test_create_mdb(ops_manager, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_mdbmc(ops_manager: MongoDBOpsManager, namespace: str): + testhelper.test_create_mdbmc(ops_manager, "mongodbmulticluster", namespace) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): + testhelper.test_create_sharded(ops_manager, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): + testhelper.test_create_standalone(ops_manager, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): + testhelper.test_create_users(ops_manager, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_pod_logs_race(multi_cluster_operator: Operator): + testhelper.test_pod_logs_race(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, multi_cluster_operator: Operator): + testhelper.test_restart_operator_pod(ops_manager, namespace, multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): + testhelper.test_pod_logs_race_after_restart(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry +def test_telemetry_configmap(namespace: str): + testhelper.test_pod_logs_race_after_restart(namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py new file mode 100644 index 000000000..76e3006da --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py @@ -0,0 +1,235 @@ +import os +from typing import Dict, List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import ( + _install_multi_cluster_operator, + run_kube_config_creation_tool, +) +from tests.multicluster.conftest import ( + cluster_spec_list, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME +from ..shared import multi_cluster_recover_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def install_operator( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: Dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], + mdba_ns: str, + mdbb_ns: str, +) -> Operator: + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + member_cluster_namespaces = mdba_ns + "," + mdbb_ns + run_kube_config_creation_tool( + member_cluster_names, + namespace, + namespace, + member_cluster_names, + True, + service_account_name=MULTI_CLUSTER_OPERATOR_NAME, + operator_name=OPERATOR_NAME, + ) + + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.createOperatorServiceAccount": "false", + "operator.watchNamespace": member_cluster_namespaces, + "multiCluster.performFailOver": "false", + }, + central_cluster_name, + operator_name=MULTI_CLUSTER_OPERATOR_NAME, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_operator_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_cluster_role_and_binding( + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_cluster_role_and_binding(central_cluster_client, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_deploy_operator(install_operator: Operator): + testhelper.test_deploy_operator(install_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns(namespace, central_cluster_client, mdba_ns, mdbb_ns) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): + testhelper.test_create_mongodb_multi_nsa_nsb(mongodb_multi_a, mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a: MongoDBMulti, + mongodb_multi_b: MongoDBMulti, + mdba_ns: str, + mdbb_ns: str, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a, mongodb_multi_b, mdba_ns, mdbb_ns, member_cluster_names, member_cluster_clients + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster( + member_cluster_names, namespace, mdba_ns, mdbb_ns, central_cluster_client + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py new file mode 100644 index 000000000..e4d2f322a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py @@ -0,0 +1,92 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_recover_network_partition as testhelper + +RESOURCE_NAME = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_manual_remediation) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], +): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_enters_failed_state( + mongodb_multi: MongoDBMulti, + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_mongodb_multi_enters_failed_state(mongodb_multi, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py new file mode 100644 index 000000000..d9f07372e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py @@ -0,0 +1,138 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.conftest import ( + setup_log_rotate_for_agents, +) +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set as testhelper + +MONGODB_PORT = 30000 +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmutlicluster-multi-central-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + additional_mongod_config = { + "systemLog": {"logAppend": True, "verbosity": 4}, + "operationProfiling": {"mode": "slowOp"}, + "net": {"port": MONGODB_PORT}, + } + + resource["spec"]["additionalMongodConfig"] = additional_mongod_config + setup_log_rotate_for_agents(resource) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.set_architecture_annotation() + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_pvc_not_created( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_pvc_not_created(mongodb_multi, member_cluster_clients, namespace) + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_headless_service_creation( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_headless_service_creation(mongodb_multi, namespace, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_update_additional_options(mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options_were_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_delete_member_cluster_sts( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_member_cluster_sts(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_cleanup_on_mdbm_delete(mongodb_multi, member_cluster_clients) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py new file mode 100644 index 000000000..760edc3ad --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py @@ -0,0 +1,63 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_deletion as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + + if try_load(resource): + return resource + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): + testhelper.test_automation_config_has_been_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_delete_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deployment_has_been_removed_from_automation_config(): + testhelper.test_deployment_has_been_removed_from_automation_config() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_kubernetes_resources_have_been_cleaned_up( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_kubernetes_resources_have_been_cleaned_up(mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py index 6178377ea..363eada59 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py @@ -1,13 +1,14 @@ import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_ignore_unknown_users as testhelper + +MDB_RESOURCE = "multi-replica-set" + @fixture(scope="module") def mongodb_multi( @@ -18,8 +19,8 @@ def mongodb_multi( ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, namespace, ) resource.set_version(custom_mdb_version) @@ -34,26 +35,21 @@ def mongodb_multi( return resource.update() -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_replica_set(multi_cluster_operator, mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_false(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(False) + testhelper.test_authoritative_set_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti): - mongodb_multi.load() - mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_set_ignore_unknown_users_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_true(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(True) + testhelper.test_authoritative_set_true(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py new file mode 100644 index 000000000..c7e29d666 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py @@ -0,0 +1,125 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_member_options as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + member_options = [ + [ + { + "votes": 1, + "priority": "0.3", + "tags": { + "cluster": "cluster-1", + "region": "weur", + }, + }, + { + "votes": 1, + "priority": "0.7", + "tags": { + "cluster": "cluster-1", + "region": "eeur", + }, + }, + ], + [ + { + "votes": 1, + "priority": "0.2", + "tags": { + "cluster": "cluster-2", + "region": "apac", + }, + }, + ], + [ + { + "votes": 1, + "priority": "1.3", + "tags": { + "cluster": "cluster-3", + "region": "nwus", + }, + }, + { + "votes": 1, + "priority": "2.7", + "tags": { + "cluster": "cluster-3", + "region": "seus", + }, + }, + ], + ] + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_member_options_ac(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_update_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_member_votes_to_0(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_recover_valid_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py similarity index 53% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py index 5f43629d0..fc52981d5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py @@ -4,9 +4,7 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import assert_statefulset_architecture from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import get_default_architecture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.multicluster_client import MultiClusterClient @@ -14,6 +12,8 @@ from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_migration as testhelper + MDBM_RESOURCE = "multi-replica-set-migration" @@ -25,7 +25,7 @@ def mongodb_multi( custom_mdb_version, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource["spec"]["version"] = custom_mdb_version resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -46,46 +46,28 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_create_mongodb_multi_running(mongodb_multi) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_migrate_architecture(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - """ - If the E2E is running with default architecture as non-static, - then the test will migrate to static and vice versa. - """ - original_default_architecture = get_default_architecture() - target_architecture = "non-static" if original_default_architecture == "static" else "static" - - mongodb_multi.trigger_architecture_migration() - - mongodb_multi.load() - assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture - - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) - - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - for statefulset in statefulsets.values(): - assert_statefulset_architecture(statefulset, target_architecture) + testhelper.test_migrate_architecture(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py new file mode 100644 index 000000000..960ced828 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py @@ -0,0 +1,112 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_down as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # start at one member in each cluster + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + if try_load(mongodb_multi_unmarshalled): + return mongodb_multi_unmarshalled + + return mongodb_multi_unmarshalled.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py new file mode 100644 index 000000000..bdf324aae --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py @@ -0,0 +1,115 @@ +from typing import List + +import kubernetes +import kubetester +import pytest +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_up as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + # we have created certs for all 5 members, but want to start at only 3. + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_deploy_operator(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py new file mode 100644 index 000000000..2f593dc62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py @@ -0,0 +1,86 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_test_mtls as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongo_pod_in_separate_namespace( + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + namespace: str, +): + testhelper.test_create_mongo_pod_in_separate_namespace(member_cluster_clients, evergreen_task_id, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_fails_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_fails_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_enable_istio_injection( + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_enable_istio_injection(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_delete_existing_mongo_pod(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_create_pod_with_istio_sidecar(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_succeeds_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_succeeds_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py new file mode 100644 index 000000000..a528ca716 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py @@ -0,0 +1,106 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_down_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_create_mongodb_multi(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py index 3acc73dff..c9d5b4e61 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py @@ -8,20 +8,18 @@ random_k8s_name, read_configmap, try_load, - wait_until, ) -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_scale_up_cluster as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -54,7 +52,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: list[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [3, 1, 2]) @@ -97,97 +95,66 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_created_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - # read all statefulsets except the last one - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(3) + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - mongodb_multi["spec"]["clusterSpecList"].append( - {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} - ) - mongodb_multi.update() - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_scaled_up_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(5) + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() @skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) # From here on, the tests are for verifying that we can change the project of the MongoDBMulti resource even with # non-sequential member ids in the replicaset. -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): def test_scale_up_first_cluster( self, mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] ): - # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. - # multi-replica-set-0-0 : 0 - # multi-replica-set-0-1 : 1 - # multi-replica-set-0-2 : 5 - # multi-replica-set-1-0 : 2 - # multi-replica-set-2-0 : 3 - # multi-replica-set-2-1 : 4 - - mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 - mongodb_multi.update() - - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_scale_up_first_cluster( + mongodb_multi, member_cluster_clients + ) def test_change_project(self, mongodb_multi: MongoDBMulti, new_project_configmap: str): - oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap - mongodb_multi.update() - - mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) - mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) - - newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - # Assert that the replica set member ids have not changed after changing the project. - assert oldRsMembers == newRsMembers + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project(mongodb_multi, new_project_configmap) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py new file mode 100644 index 000000000..c1b456369 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py @@ -0,0 +1,130 @@ +from typing import Callable, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_up_cluster_new_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_delete_deployment(namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_re_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_re_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_add_new_cluster_to_mongodb_multi_resource( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_re_deploy_operator(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py new file mode 100644 index 000000000..d9941c298 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py @@ -0,0 +1,144 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scram as testhelper + +MDB_RESOURCE = "multi-replica-set-scram" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@pytest.fixture(scope="function") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = { + "authentication": { + "agents": {"mode": "MONGODB-CR"}, + "enabled": True, + "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], + } + } + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.fixture(scope="function") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi_with_scram(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_reaches_updated( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, +): + testhelper.test_user_reaches_updated(central_cluster_client, mongodb_user) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity_using_user_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_change_password_and_check_connectivity( + namespace: str, + mongodb_multi: MongoDBMulti, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_change_password_and_check_connectivity(namespace, mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): + testhelper.test_user_cannot_authenticate_with_old_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_connection_string_secret_was_created( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_connection_string_secret_was_created(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_om_configured_correctly(): + testhelper.test_om_configured_correctly() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py index c32b8c38a..7d8be4df1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py @@ -1,30 +1,25 @@ from typing import List import kubernetes -import yaml from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark +from ..shared import multi_cluster_split_horizon as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" # This test will set up an environment which will configure a resource with split horizon enabled. # Steps to run this test. # 1. Change the nodenames under "additional_domains" -# 2. Run this test with: `make e2e test=e2e_multi_cluster_split_horizon light=true local=true`. +# 2. Run this test with: `make e2e test=e2e_mongodbmulticluster_multi_cluster_split_horizon light=true local=true`. # 3. Wait for the test to pass (this means the environment is set up.) # 4. Exec into any database pod and note the contents of the files referenced by the fields # * net.tls.certificateKeyFile @@ -55,7 +50,9 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-split-horizon.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-split-horizon.yaml"), MDB_RESOURCE, namespace + ) return resource @@ -102,48 +99,25 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_mongodb_multi_with_tls( mongodb_multi: MongoDBMulti, namespace: str, ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_create_node_ports(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - for mcc in member_cluster_clients: - with open( - yaml_fixture(f"split-horizon-node-ports/split-horizon-node-port.yaml"), - "r", - ) as f: - service_body = yaml.safe_load(f.read()) - - # configure labels and selectors - service_body["metadata"]["labels"][ - "mongodbmulticluster" - ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" - service_body["metadata"]["labels"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - service_body["spec"]["selector"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - - KubernetesTester.create_service( - mongodb_multi.namespace, - body=service_body, - api_client=mcc.api_client, - ) + testhelper.test_create_node_ports(mongodb_multi, member_cluster_clients) @skip_if_local -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_tls_connectivity(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py new file mode 100644 index 000000000..1d21aff50 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py @@ -0,0 +1,59 @@ +from typing import List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator + +from ..shared import multi_cluster_sts_override as testhelper + +MDB_RESOURCE = "multi-replica-set-sts-override" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_access_modes_pvc( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_access_modes_pvc(mongodb_multi, member_cluster_clients, namespace) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py new file mode 100644 index 000000000..0747f020d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py @@ -0,0 +1,198 @@ +from typing import List + +import kubernetes +from kubernetes import client +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_no_mesh as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, member_cluster_names: List[str], custom_mdb_version: str +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-1.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing0", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-2.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing1", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][2]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-3.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing2", + "port": 27019, + }, + ], + } + }, + } + + return resource + + +@fixture(scope="module") +def disable_istio( + multi_cluster_operator: Operator, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + for mcc in member_cluster_clients: + api = client.CoreV1Api(api_client=mcc.api_client) + labels = {"istio-injection": "disabled"} + ns = api.read_namespace(name=namespace) + ns.metadata.labels.update(labels) + api.replace_namespace(name=namespace, body=ns) + return None + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + disable_istio, + namespace: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return mongodb_multi_unmarshalled.update() + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): + testhelper.test_update_coredns(cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_service_overrides( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_service_overrides(namespace, mongodb_multi, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py new file mode 100644 index 000000000..bd590c055 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py @@ -0,0 +1,175 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_with_scram as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list( + member_cluster_names=member_cluster_names, members=[2, 1, 2] + ) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@fixture(scope="module") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource["spec"]["mongodbResourceRef"]["namespace"] = namespace + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_update_mongodb_multi_tls_with_scram( + mongodb_multi: MongoDBMulti, + namespace: str, +): + + testhelper.test_update_mongodb_multi_tls_with_scram(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_create_mongodb_user(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_connectivity_with_scram_and_tls(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_enable_x509( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_enable_x509(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_automation_config_was_updated( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_automation_config_was_updated(mongodb_multi, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py index c0c421b3f..312c518dd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py @@ -1,24 +1,22 @@ -import tempfile from typing import List import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert from kubetester.certs_mongodb_multi import ( create_multi_cluster_mongodb_x509_tls_certs, create_multi_cluster_x509_agent_certs, ) -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_tls_with_x509 as testhelper + # TODO This test needs to re-introduce certificate rotation and enabling authentication step by step # See https://jira.mongodb.org/browse/CLOUDP-311366 @@ -31,7 +29,7 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -122,7 +120,9 @@ def mongodb_multi( @fixture(scope="module") def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-x509-user.yaml"), "multi-replica-set-x509-user", namespace) + resource = MongoDBUser.from_yaml( + yaml_fixture("mongodb-x509-user.yaml"), "multi-replica-set-x509-user", namespace + ) resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -131,35 +131,32 @@ def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, names return resource -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi, namespace) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti): - ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) - ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") - ac_tester.assert_internal_cluster_authentication_enabled() + testhelper.test_ops_manager_state_was_updated_correctly(mongodb_multi) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_create_mongodb_x509_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_x509_user: MongoDBUser, namespace: str, ): - mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + testhelper.test_create_mongodb_x509_user(central_cluster_client, mongodb_x509_user, namespace) @skip_if_local -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_x509_user_connectivity( mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient, @@ -167,23 +164,12 @@ def test_x509_user_connectivity( namespace: str, ca_path: str, ): - with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: - create_multi_cluster_x509_user_cert( - multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name - ) - tester = mongodb_multi.tester() - tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + testhelper.test_x509_user_connectivity( + mongodb_multi, central_cluster_client, multi_cluster_issuer, namespace, ca_path + ) # TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms # keeps the resources reachable and in Running state. def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): - cert = Certificate(name=certificate_name, namespace=namespace) - cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) - cert.load() - cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate - cert.update() - # FIXME the assertions below need to be replaced with a robust check that the agents are ready - # and the TLS certificates are rotated. - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py index 4aba05a9c..947d72785 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py @@ -2,14 +2,15 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_upgrade_downgrade as testhelper + MDBM_RESOURCE = "multi-replica-set-upgrade" @@ -21,7 +22,7 @@ def mongodb_multi( custom_mdb_prev_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_prev_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -42,60 +43,43 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_create_mongodb_multi_running(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_upgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str, custom_mdb_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_mongodb_multi_upgrade(mongodb_multi, custom_mdb_prev_version, custom_mdb_version) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) - -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_upgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_mongodb_multi_downgrade(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_downgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py new file mode 100644 index 000000000..390bbd939 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -0,0 +1,25 @@ +import kubernetes +import pytest +import yaml +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.operator import Operator + +from ..shared import multi_cluster_validation as testhelper + +MDBM_RESOURCE = "mongodbmulticluster-multi-cluster.yaml" + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation +class TestWebhookValidation(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator, MDBM_RESOURCE) + + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_unique_cluster_names(central_cluster_client, MDBM_RESOURCE) + + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_only_one_schema(central_cluster_client, MDBM_RESOURCE) + + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_non_empty_clusterspec_list(central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py deleted file mode 100644 index 981df49d4..000000000 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py +++ /dev/null @@ -1,516 +0,0 @@ -import datetime -import time -from typing import Dict, List, Optional - -import kubernetes -import kubernetes.client -import pymongo -import pytest -from kubernetes import client -from kubetester import ( - create_or_update_configmap, - create_or_update_secret, - get_default_storage_class, - read_service, - try_load, -) -from kubetester.certs import create_ops_manager_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local -from kubetester.mongodb import MongoDB -from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongodb_user import MongoDBUser -from kubetester.multicluster_client import MultiClusterClient -from kubetester.omtester import OMTester -from kubetester.operator import Operator -from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase -from pytest import fixture, mark -from tests.conftest import ( - assert_data_got_restored, - update_coredns_hosts, - wait_for_primary, -) - -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} - -MONGODB_PORT = 30000 - - -HEAD_PATH = "/head/" -OPLOG_RS_NAME = "my-mongodb-oplog" -BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" -USER_PASSWORD = "/qwerty@!#:" - - -@fixture(scope="module") -def ops_manager_certs( - namespace: str, - multi_cluster_issuer: str, - central_cluster_client: kubernetes.client.ApiClient, -): - return create_ops_manager_tls_certs( - multi_cluster_issuer, - namespace, - "om-backup", - secret_name="mdb-om-backup-cert", - # We need the interconnected certificate since we update coreDNS later with that ip -> domain - # because our central cluster is not part of the mesh, but we can access the pods via external IPs. - # Since we are using TLS we need a certificate for a hostname, an IP does not work, hence - # f"om-backup.{namespace}.interconnected" -> IP setup below - additional_domains=[ - "fastdl.mongodb.org", - f"om-backup.{namespace}.interconnected", - ], - api_client=central_cluster_client, - ) - - -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - -def new_om_data_store( - mdb: MongoDB, - id: str, - assignment_enabled: bool = True, - user_name: Optional[str] = None, - password: Optional[str] = None, -) -> Dict: - return { - "id": id, - "uri": mdb.mongo_uri(user_name=user_name, password=password), - "ssl": mdb.is_tls_enabled(), - "assignmentEnabled": assignment_enabled, - } - - -@fixture(scope="module") -def ops_manager( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - custom_version: Optional[str], - custom_appdb_version: str, - ops_manager_certs: str, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBOpsManager: - resource: MongoDBOpsManager = MongoDBOpsManager.from_yaml( - yaml_fixture("om_ops_manager_backup.yaml"), namespace=namespace - ) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource["spec"]["externalConnectivity"] = {"type": "LoadBalancer"} - resource["spec"]["security"] = { - "certsSecretPrefix": "mdb", - "tls": {"ca": multi_cluster_issuer_ca_configmap}, - } - # remove s3 config - del resource["spec"]["backup"]["s3Stores"] - - resource.set_version(custom_version) - resource.set_appdb_version(custom_appdb_version) - resource.allow_mdb_rc_versions() - resource.create_admin_secret(api_client=central_cluster_client) - - try_load(resource) - - return resource - - -@fixture(scope="module") -def oplog_replica_set( - ops_manager, - namespace, - custom_mdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDB: - resource = MongoDB.from_yaml( - yaml_fixture("replica-set-for-om.yaml"), - namespace=namespace, - name=OPLOG_RS_NAME, - ) - - create_project_config_map( - om=ops_manager, - project_name="development", - mdb_name=OPLOG_RS_NAME, - client=central_cluster_client, - custom_ca=multi_cluster_issuer_ca_configmap, - ) - - resource.configure(ops_manager, "development") - - resource["spec"]["opsManager"]["configMapRef"]["name"] = OPLOG_RS_NAME + "-config" - resource.set_version(custom_mdb_version) - - resource["spec"]["security"] = {"authentication": {"enabled": True, "modes": ["SCRAM"]}} - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - yield resource.update() - - -@fixture(scope="module") -def blockstore_replica_set( - ops_manager, - namespace, - custom_mdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDB: - resource = MongoDB.from_yaml( - yaml_fixture("replica-set-for-om.yaml"), - namespace=namespace, - name=BLOCKSTORE_RS_NAME, - ) - - create_project_config_map( - om=ops_manager, - project_name="blockstore", - mdb_name=BLOCKSTORE_RS_NAME, - client=central_cluster_client, - custom_ca=multi_cluster_issuer_ca_configmap, - ) - - resource.configure(ops_manager, "blockstore") - - resource.set_version(custom_mdb_version) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - yield resource.update() - - -@fixture(scope="module") -def blockstore_user( - namespace, - blockstore_replica_set: MongoDB, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBUser: - """Creates a password secret and then the user referencing it""" - resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user-backing-db.yaml"), namespace=namespace) - resource["spec"]["mongodbResourceRef"]["name"] = blockstore_replica_set.name - - print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") - create_or_update_secret( - KubernetesTester.get_namespace(), - resource.get_secret_name(), - { - "password": USER_PASSWORD, - }, - api_client=central_cluster_client, - ) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - yield resource.update() - - -@fixture(scope="module") -def oplog_user( - namespace, - oplog_replica_set: MongoDB, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBUser: - """Creates a password secret and then the user referencing it""" - resource = MongoDBUser.from_yaml( - yaml_fixture("scram-sha-user-backing-db.yaml"), - namespace=namespace, - name="mms-user-2", - ) - resource["spec"]["mongodbResourceRef"]["name"] = oplog_replica_set.name - resource["spec"]["passwordSecretKeyRef"]["name"] = "mms-user-2-password" - resource["spec"]["username"] = "mms-user-2" - - print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") - create_or_update_secret( - KubernetesTester.get_namespace(), - resource.get_secret_name(), - { - "password": USER_PASSWORD, - }, - api_client=central_cluster_client, - ) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - yield resource.update() - - -@mark.e2e_multi_cluster_backup_restore -def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() - - -@mark.e2e_multi_cluster_backup_restore -class TestOpsManagerCreation: - """ - name: Ops Manager successful creation with backup and oplog stores enabled - description: | - Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state - eventually as it will wait for oplog db to be created - """ - - def test_create_om( - self, - ops_manager: MongoDBOpsManager, - ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) - - def test_daemon_statefulset( - self, - ops_manager: MongoDBOpsManager, - ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) - - def test_backup_daemon_services_created( - self, - namespace, - central_cluster_client: kubernetes.client.ApiClient, - ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 - - -@mark.e2e_multi_cluster_backup_restore -class TestBackupDatabasesAdded: - """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to - running state""" - - def test_backup_mdbs_created( - self, - oplog_replica_set: MongoDB, - blockstore_replica_set: MongoDB, - ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) - - def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) - - def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) - - def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None - - -class TestBackupForMongodb: - @fixture(scope="module") - def base_url( - self, - ops_manager: MongoDBOpsManager, - ) -> str: - """ - The base_url makes OM accessible from member clusters via a special interconnected dns address. - This address only works for member clusters. - """ - interconnected_field = f"https://om-backup.{ops_manager.namespace}.interconnected" - new_address = f"{interconnected_field}:8443" - - return new_address - - @fixture(scope="module") - def project_one( - self, - ops_manager: MongoDBOpsManager, - namespace: str, - central_cluster_client: kubernetes.client.ApiClient, - base_url: str, - ) -> OMTester: - return ops_manager.get_om_tester( - project_name=f"{namespace}-project-one", - api_client=central_cluster_client, - base_url=base_url, - ) - - @fixture(scope="function") - def mdb_client(self, mongodb_multi_one: MongoDBMulti): - return pymongo.MongoClient( - mongodb_multi_one.tester(port=MONGODB_PORT).cnx_string, - **mongodb_multi_one.tester(port=MONGODB_PORT).default_opts, - readPreference="primary", # let's read from the primary and not stale data from the secondary - ) - - @fixture(scope="function") - def mongodb_multi_one_collection(self, mdb_client): - - # Ensure primary is available before proceeding - wait_for_primary(mdb_client) - - return mdb_client["testdb"]["testcollection"] - - @fixture(scope="module") - def mongodb_multi_one( - self, - ops_manager: MongoDBOpsManager, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: List[str], - base_url, - custom_mdb_version: str, - ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set-one", - namespace, - # the project configmap should be created in the central cluster. - ).configure(ops_manager, f"{namespace}-project-one", api_client=central_cluster_client) - - resource.set_version(ensure_ent_version(custom_mdb_version)) - resource["spec"]["clusterSpecList"] = [ - {"clusterName": member_cluster_names[0], "members": 2}, - {"clusterName": member_cluster_names[1], "members": 1}, - {"clusterName": member_cluster_names[2], "members": 2}, - ] - - # creating a cluster with backup should work with custom ports - resource["spec"].update({"additionalMongodConfig": {"net": {"port": MONGODB_PORT}}}) - - resource.configure_backup(mode="enabled") - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - data = KubernetesTester.read_configmap( - namespace, "multi-replica-set-one-config", api_client=central_cluster_client - ) - KubernetesTester.delete_configmap(namespace, "multi-replica-set-one-config", api_client=central_cluster_client) - data["baseUrl"] = base_url - data["sslMMSCAConfigMap"] = multi_cluster_issuer_ca_configmap - create_or_update_configmap( - namespace, - "multi-replica-set-one-config", - data, - api_client=central_cluster_client, - ) - - return resource.update() - - @mark.e2e_multi_cluster_backup_restore - def test_setup_om_connection( - self, - ops_manager: MongoDBOpsManager, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_clients: List[MultiClusterClient], - ): - """ - The base_url makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", - ) - - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore - def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) - - @skip_if_local - @mark.e2e_multi_cluster_backup_restore - @pytest.mark.flaky(reruns=100, reruns_delay=6) - def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) - - @mark.e2e_multi_cluster_backup_restore - def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) - - @mark.e2e_multi_cluster_backup_restore - def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) - - @mark.e2e_multi_cluster_backup_restore - def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - backup_completion_time = project_one.get_latest_backup_completion_time() - print("\nbackup_completion_time: {}".format(backup_completion_time)) - - pit_millis = backup_completion_time + 1500 - - print(f"Restoring back to: {pit_millis}") - - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore - def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py new file mode 100644 index 000000000..3679de73d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -0,0 +1,22 @@ +from typing import List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py new file mode 100644 index 000000000..a786f9951 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py @@ -0,0 +1,121 @@ +from typing import Dict, List + +import kubernetes +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + read_configmap, + read_secret, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.reload() + mongodb_multi_a["spec"]["authentication"] = ( + { + "agents": {"mode": "SCRAM"}, + "enabled": True, + "modes": ["SCRAM"], + }, + ) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py new file mode 100644 index 000000000..402c69947 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py @@ -0,0 +1,43 @@ +from typing import Dict, List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + cluster_one_client = member_cluster_clients[0] + cluster_one_sts = statefulsets[cluster_one_client.cluster_name] + assert cluster_one_sts.status.ready_replicas == 2 + + cluster_two_client = member_cluster_clients[1] + cluster_two_sts = statefulsets[cluster_two_client.cluster_name] + assert cluster_two_sts.status.ready_replicas == 1 + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py index 1a552f39c..5b887aa53 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py @@ -1,43 +1,19 @@ from typing import List -import kubernetes from kubetester import client from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders -from tests.multicluster.conftest import cluster_spec_list -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # override agent startup flags - resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} - resource["spec"]["agent"]["logLevel"] = "DEBUG" - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@mark.e2e_multi_cluster_agent_flags -def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_agent_flags def test_multi_replicaset_has_agent_flags( namespace: str, member_cluster_clients: List[MultiClusterClient], @@ -58,10 +34,9 @@ def test_multi_replicaset_has_agent_flags( assert result != "0" -@mark.e2e_multi_cluster_agent_flags def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py index 57928b907..c7511d639 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py @@ -5,39 +5,20 @@ from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import get_member_cluster_api_client - -from .conftest import cluster_spec_list, create_service_entries_objects +from tests.multicluster.conftest import ( + create_service_entries_objects, +) FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@mark.e2e_multi_cluster_disaster_recovery def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -48,26 +29,20 @@ def test_label_namespace(namespace: str, central_cluster_client: kubernetes.clie api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_disaster_recovery def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_disaster_recovery def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -82,16 +57,16 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery def test_mongodb_multi_leaves_running_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, ): mongodb_multi.load() mongodb_multi.assert_abandons_phase(Phase.Running, timeout=300) -@mark.e2e_multi_cluster_disaster_recovery -def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str] +): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) sts_name = f"{mongodb_multi.name}-{failed_cluster_idx}" try: @@ -115,22 +90,17 @@ def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMul ) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@mark.e2e_multi_cluster_disaster_recovery -def test_replica_reaches_running(mongodb_multi: MongoDBMulti): +def test_replica_reaches_running(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) desiredmembers = 0 for c in mongodb_multi["spec"]["clusterSpecList"]: @@ -140,9 +110,8 @@ def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): assert len(processes) == desiredmembers -@mark.e2e_multi_cluster_disaster_recovery def test_sts_count_in_member_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], member_cluster_clients: List[MultiClusterClient], ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py new file mode 100644 index 000000000..f1eec9b20 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -0,0 +1,214 @@ +import datetime +import time +from typing import List + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import ( + assert_data_got_restored, + update_coredns_hosts, +) + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +MONGODB_PORT = 30000 + +HEAD_PATH = "/head/" +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + def test_setup_om_connection( + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) + + def test_add_test_data(mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + backup_completion_time = project_one.get_latest_backup_completion_time() + print("\nbackup_completion_time: {}".format(backup_completion_time)) + + pit_millis = backup_completion_time + 1500 + + print(f"Restoring back to: {pit_millis}") + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(mongodb_multi_one_collection, mdb_client): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py new file mode 100644 index 000000000..68bbaa38d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -0,0 +1,258 @@ +# This test sets up ops manager in a multicluster "no-mesh" environment. +# It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. + +import datetime +import time +from typing import List, Optional, Tuple + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import assert_data_got_restored, update_coredns_hosts + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +HEAD_PATH = "/head/" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def new_om_data_store( + mdb: MongoDB, + id: str, + assignment_enabled: bool = True, + user_name: Optional[str] = None, + password: Optional[str] = None, +) -> dict: + return { + "id": id, + "uri": mdb.mongo_uri(user_name=user_name, password=password), + "ssl": mdb.is_tls_enabled(), + "assignmentEnabled": assignment_enabled, + } + + +def test_update_coredns( + replica_set_external_hosts: List[Tuple[str, str]], + cluster_clients: dict[str, kubernetes.client.ApiClient], +): + """ + This test updates the coredns config in the member clusters to allow connecting to the other replica set members + through an external address. + """ + for cluster_name, cluster_api in cluster_clients.items(): + update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + + def test_setup_om_connection( + self, + replica_set_external_hosts: List[Tuple[str, str]], + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + hosts = replica_set_external_hosts[:] + hosts.append((ip, interconnected_field)) + + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=hosts, + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + + def test_add_test_data(self, mongodb_multi_one_collection): + max_attempts = 100 + while max_attempts > 0: + try: + mongodb_multi_one_collection.insert_one(TEST_DATA) + return + except Exception as e: + print(e) + max_attempts -= 1 + time.sleep(6) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status + # right away. + # But the agent might still do work on the cluster, so we need to wait for that to happen. + mongodb_multi_one.assert_reaches_phase(Phase.Pending) + mongodb_multi_one.assert_reaches_phase(Phase.Running) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py new file mode 100644 index 000000000..7fdb9bdc1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py @@ -0,0 +1,80 @@ +from typing import Callable, List + +import kubernetes +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.conftest import ( + run_kube_config_creation_tool, + run_multi_cluster_recovery_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) + # deploy the operator without the final cluster + operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) + operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_add_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str]): + mongodb_multi.load() + + mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): + mongodb_multi.load() + + last_transition_time = mongodb_multi.get_status_last_transition_time() + + mongodb_multi["spec"]["clusterSpecList"].pop(0) + mongodb_multi.update() + mongodb_multi.assert_state_transition_happens(last_transition_time) + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py new file mode 100644 index 000000000..3ab1c88b1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py @@ -0,0 +1,123 @@ +import time +from typing import Dict, List + +import kubernetes +from kubernetes import client +from kubetester import create_or_update_configmap, create_or_update_secret, read_secret +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + unmanaged_mdb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + unmanaged_mdb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_deploy_operator(install_operator: Operator): + install_operator.assert_is_running() + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti | MongoDB): + """ + For an unmanaged resource, the status should not be updated! + """ + for i in range(10): + time.sleep(5) + + unmanaged_mongodb_multi.reload() + assert "status" not in unmanaged_mongodb_multi diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py index c2cc0d988..bb7d7d467 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py @@ -2,9 +2,7 @@ import time from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase @@ -14,52 +12,28 @@ CLUSTER_TO_DELETE = "member-3a" -# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping -# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_multi_cluster_dr local=true -@pytest.fixture(scope="module") -def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-dr.yaml"), "multi-replica-set", namespace) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - # return resource.load() - return resource.create() - - -@pytest.fixture(scope="module") -def mongodb_multi_collection(mongodb_multi: MongoDBMulti): - collection = mongodb_multi.tester().client["testdb"] - return collection["testcollection"] - - -@pytest.mark.e2e_multi_cluster_dr def test_create_kube_config_file(cluster_clients: Dict): clients = cluster_clients assert len(clients) == 4 -@pytest.mark.e2e_multi_cluster_dr def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_dr -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr -@pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): mongodb_multi_collection.insert_one(TEST_DATA) -@pytest.mark.e2e_multi_cluster_dr def test_delete_member_3_cluster(): # delete 3rd cluster with gcloud command # gcloud container clusters delete member-3a --zone us-west1-a @@ -79,13 +53,11 @@ def test_delete_member_3_cluster(): ) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): max_attempts = 100 while max_attempts > 0: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py new file mode 100644 index 000000000..3f83581e8 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py @@ -0,0 +1,46 @@ +from typing import List + +from kubetester import read_secret +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_enabled_tls_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.load() + mongodb_multi["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) + + # assert the presence of the generated pem certificates in each member cluster + for client in member_cluster_clients: + read_secret( + namespace=namespace, + name=BUNDLE_PEM_SECRET_NAME, + api_client=client.api_client, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py new file mode 100644 index 000000000..edae362ed --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py @@ -0,0 +1,163 @@ +from kubetester import wait_until +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic + recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). + """ + mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + + +def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. + Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. + """ + + def wait_for_ac_exists() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + _ = ac["ldap"]["transportSecurity"] + _ = ac["version"] + return True + except KeyError: + return False + + wait_until(wait_for_ac_exists, timeout=200) + current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] + + def wait_for_ac_pushed() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + transport_security = ac["ldap"]["transportSecurity"] + new_version = ac["version"] + if transport_security != "none": + return False + if new_version <= current_version: + return False + return True + except KeyError: + return False + + wait_until(wait_for_ac_pushed, timeout=500) + + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource.update() + + +def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet + goes into running state. + """ + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + + +def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function restores the initial desired security configuration to carry on with the next tests normally. + """ + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" + + resource.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) + ac.assert_expected_users(1) + + +def test_ldap_user_created_and_can_authenticate( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + expected_roles = { + ("admin", "clusterAdmin"), + ("admin", "readWriteAnyDatabase"), + ("admin", "dbAdminAnyDatabase"), + } + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_expected_users(1) + ac.assert_has_user(user_ldap["spec"]["username"]) + ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) + ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) + ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) + + assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] + assert "timeoutMS" in ac.automation_config["ldap"] + assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 + assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + + +def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names): + mongodb_multi.reload() + mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_new_ldap_user_can_authenticate_after_scaling( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_disable_agent_auth(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.reload() + mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False + mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py new file mode 100644 index 000000000..db3b9582c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py @@ -0,0 +1,80 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) + ac.assert_expected_users(1) + + +def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_collection( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo2", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_database( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo2", + collection="foo", + attempts=10, + ) + + +def test_automation_config_has_roles(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + role = { + "role": "cn=users,ou=groups,dc=example,dc=org", + "db": "admin", + "privileges": [ + {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, + { + "actions": ["insert", "find"], + "resource": {"collection": "", "db": "admin"}, + }, + ], + "authenticationRestrictions": [], + } + tester.assert_expected_role(role_index=0, expected_value=role) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py new file mode 100644 index 000000000..72bd99b99 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py @@ -0,0 +1,24 @@ +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_oidc_replica_set(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_assert_connectivity(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + +def test_ops_manager_state_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(0) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py new file mode 100644 index 000000000..c58b770d3 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py @@ -0,0 +1,29 @@ +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_oidc_replica_set(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_user(oidc_user: MongoDBUser): + oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + + +def test_assert_connectivity(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + +def test_ops_manager_state_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(1) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py index adc9cc668..2fdcb2b54 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py @@ -1,50 +1,26 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester import get_statefulset, try_load -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb_multi import MongoDBMulti +from kubetester import get_statefulset +from kubetester.mongodb_multi import MongoDB, MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list RESOURCE_NAME = "multi-replica-set-pvc-resize" RESIZED_STORAGE_SIZE = "2Gi" -@pytest.fixture(scope="module") -def mongodb_multi( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - try_load(resource) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource - - -@pytest.mark.e2e_multi_cluster_pvc_resize def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti | MongoDB): # Update the resource mongodb_multi.load() mongodb_multi["spec"]["statefulSet"]["spec"]["volumeClaimTemplates"][0]["spec"]["resources"]["requests"][ @@ -55,9 +31,8 @@ def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_pvc_resize def test_mongodb_multi_resize_finished( - mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient] ): statefulsets = [] for i, c in enumerate(member_cluster_clients): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py similarity index 75% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py index 10d0064f6..476288758 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py @@ -1,11 +1,9 @@ # It's intended to check for reconcile data races. import json import time -from typing import Optional -import kubernetes.client import pytest -from kubetester import create_or_update_secret, find_fixture, try_load +from kubetester import create_or_update_secret, try_load from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb import MongoDB @@ -23,40 +21,6 @@ from tests.multicluster.conftest import cluster_spec_list -@pytest.fixture(scope="module") -def ops_manager( - namespace: str, - custom_version: Optional[str], - custom_appdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBOpsManager: - resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om") - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.set_version(custom_version) - resource.set_appdb_version(custom_appdb_version) - - try_load(resource) - return resource - - -@pytest.fixture(scope="module") -def ops_manager2( - namespace: str, - custom_version: Optional[str], - custom_appdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBOpsManager: - resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om2") - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.set_version(custom_version) - resource.set_appdb_version(custom_appdb_version) - - try_load(resource) - return resource - - def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: name = f"mdb-{idx}-rs" resource = MongoDB.from_yaml( @@ -70,14 +34,21 @@ def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: return resource -def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti: +def get_mdbmc(ops_manager, type: str, namespace: str, idx: int) -> MongoDBMulti | MongoDB: name = f"mdb-{idx}-mc" - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), - namespace=namespace, - name=name, - ).configure(ops_manager, name, api_client=get_central_cluster_client()) - + resourceName = f"{type}-multi-cluster.yaml" + if type == "mongodb": + resource = MongoDB.from_yaml( + yaml_fixture(resourceName), + namespace=namespace, + name=name, + ).configure(ops_manager, name, api_client=get_central_cluster_client()) + else: + resource = MongoDBMulti.from_yaml( + yaml_fixture(resourceName), + namespace=namespace, + name=name, + ).configure(ops_manager, name, api_client=get_central_cluster_client()) try_load(resource) return resource @@ -123,8 +94,8 @@ def get_all_rs(ops_manager, namespace) -> list[MongoDB]: return [get_replica_set(ops_manager, namespace, idx) for idx in range(0, 5)] -def get_all_mdbmc(ops_manager, namespace) -> list[MongoDB]: - return [get_mdbmc(ops_manager, namespace, idx) for idx in range(0, 4)] +def get_all_mdbmc(ops_manager, type, namespace) -> list[MongoDB]: + return [get_mdbmc(ops_manager, type, namespace, idx) for idx in range(0, 4)] def get_all_standalone(ops_manager, namespace) -> list[MongoDB]: @@ -135,30 +106,25 @@ def get_all_users(ops_manager, namespace, mdb: MongoDB) -> list[MongoDBUser]: return [get_user(ops_manager, namespace, idx, mdb) for idx in range(0, 2)] -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_om(ops_manager: MongoDBOpsManager, ops_manager2: MongoDBOpsManager): ops_manager.update() ops_manager2.update() -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_om_ready(ops_manager: MongoDBOpsManager): ops_manager.appdb_status().assert_reaches_phase(Phase.Running, timeout=1800) ops_manager.om_status().assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_om2_ready(ops_manager2: MongoDBOpsManager): ops_manager2.appdb_status().assert_reaches_phase(Phase.Running, timeout=1800) ops_manager2.om_status().assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_rs(ops_manager, namespace): resource["spec"]["security"] = { @@ -171,9 +137,8 @@ def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry -def test_create_mdbmc(ops_manager: MongoDBOpsManager, namespace: str): - for resource in get_all_mdbmc(ops_manager, namespace): +def test_create_mdbmc(ops_manager: MongoDBOpsManager, type: str, namespace: str): + for resource in get_all_mdbmc(ops_manager, type, namespace): resource.set_version(get_custom_mdb_version()) resource["spec"]["clusterSpecList"] = cluster_spec_list(get_member_cluster_names(), [1, 1, 1]) resource.update() @@ -182,7 +147,6 @@ def test_create_mdbmc(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_sharded(ops_manager, namespace): resource.set_version(get_custom_mdb_version()) @@ -192,7 +156,6 @@ def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_standalone(ops_manager, namespace): resource.set_version(get_custom_mdb_version()) @@ -202,7 +165,6 @@ def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): create_or_update_secret( namespace, @@ -219,7 +181,6 @@ def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_pod_logs_race(multi_cluster_operator: Operator): pods = multi_cluster_operator.list_operator_pods() pod_name = pods[0].metadata.name @@ -231,7 +192,6 @@ def test_pod_logs_race(multi_cluster_operator: Operator): assert not contains_race -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, multi_cluster_operator: Operator): # this enforces a requeue of all existing resources, increasing the chances of races to happen multi_cluster_operator.restart_operator_deployment() @@ -241,7 +201,6 @@ def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, mu r.assert_reaches_phase(Phase.Running) -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): pods = multi_cluster_operator.list_operator_pods() pod_name = pods[0].metadata.name @@ -253,7 +212,6 @@ def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): assert not contains_race -@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_telemetry_configmap(namespace: str): config = KubernetesTester.read_configmap(namespace, TELEMETRY_CONFIGMAP_NAME) for ts_key in ["lastSendTimestampClusters", "lastSendTimestampDeployments", "lastSendTimestampOperators"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py index 31d4fb5a3..87bc17ae6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py @@ -1,4 +1,3 @@ -import os from typing import Dict, List import kubernetes @@ -13,112 +12,26 @@ read_secret, statefulset_is_deleted, ) -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( - _install_multi_cluster_operator, - run_kube_config_creation_tool, run_multi_cluster_recovery_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import ( + create_service_entries_objects, +) -from ..constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_service_entries_objects from .multi_cluster_clusterwide import create_namespace FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mdba_ns(namespace: str): - return "{}-mdb-ns-a".format(namespace) - - -@fixture(scope="module") -def mdbb_ns(namespace: str): - return "{}-mdb-ns-b".format(namespace) - - -@fixture(scope="module") -def mongodb_multi_a( - central_cluster_client: kubernetes.client.ApiClient, - mdba_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def mongodb_multi_b( - central_cluster_client: kubernetes.client.ApiClient, - mdbb_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def install_operator( - namespace: str, - central_cluster_name: str, - multi_cluster_operator_installation_config: Dict[str, str], - central_cluster_client: client.ApiClient, - member_cluster_clients: List[MultiClusterClient], - member_cluster_names: List[str], - mdba_ns: str, - mdbb_ns: str, -) -> Operator: - os.environ["HELM_KUBECONTEXT"] = central_cluster_name - member_cluster_namespaces = mdba_ns + "," + mdbb_ns - run_kube_config_creation_tool( - member_cluster_names, - namespace, - namespace, - member_cluster_names, - True, - service_account_name=MULTI_CLUSTER_OPERATOR_NAME, - operator_name=OPERATOR_NAME, - ) - - return _install_multi_cluster_operator( - namespace, - multi_cluster_operator_installation_config, - central_cluster_client, - member_cluster_clients, - { - "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.createOperatorServiceAccount": "false", - "operator.watchNamespace": member_cluster_namespaces, - "multiCluster.performFailOver": "false", - }, - central_cluster_name, - operator_name=MULTI_CLUSTER_OPERATOR_NAME, - ) - - -@mark.e2e_multi_cluster_recover_clusterwide def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -129,7 +42,6 @@ def test_label_operator_namespace(namespace: str, central_cluster_client: kubern api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -161,13 +73,11 @@ def test_create_namespaces( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_cluster_role_and_binding( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], @@ -188,12 +98,10 @@ def test_delete_cluster_role_and_binding( delete_cluster_role_binding(name, client.api_client) -@mark.e2e_multi_cluster_recover_clusterwide def test_deploy_operator(install_operator: Operator): install_operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -216,7 +124,6 @@ def test_prepare_namespace( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -235,13 +142,11 @@ def test_copy_configmap_and_secret_across_ns( create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) -@mark.e2e_multi_cluster_recover_clusterwide -def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti | MongoDB, mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -258,10 +163,9 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_database_statefulsets_in_failed_cluster( - mongodb_multi_a: MongoDBMulti, - mongodb_multi_b: MongoDBMulti, + mongodb_multi_a: MongoDBMulti | MongoDB, + mongodb_multi_b: MongoDBMulti | MongoDB, mdba_ns: str, mdbb_ns: str, member_cluster_names: list[str], @@ -307,19 +211,16 @@ def test_delete_database_statefulsets_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -338,8 +239,7 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a["metadata"]["annotations"]["failedClusters"] = None @@ -349,8 +249,7 @@ def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMul mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b["metadata"]["annotations"]["failedClusters"] = None diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py index 28b910efb..0b6c82fc4 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py @@ -4,41 +4,22 @@ from kubeobject import CustomObject from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted -from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( get_member_cluster_api_client, run_multi_cluster_recovery_tool, ) - -from ..constants import MULTI_CLUSTER_OPERATOR_NAME -from .conftest import cluster_spec_list, create_service_entries_objects - +from tests.multicluster.conftest import ( + create_service_entries_objects, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -RESOURCE_NAME = "multi-replica-set" - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = client.CustomObjectsApi(central_cluster_client) - return resource - -@mark.e2e_multi_cluster_recover_network_partition def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -50,24 +31,20 @@ def test_label_namespace(namespace: str, central_cluster_client: client.ApiClien api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_network_partition def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): multi_cluster_operator_manual_remediation.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_recover_network_partition def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -86,9 +63,8 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_delete_database_statefulset_in_failed_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], ): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) @@ -114,9 +90,8 @@ def test_delete_database_statefulset_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_network_partition def test_mongodb_multi_enters_failed_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, central_cluster_client: client.ApiClient, ): @@ -124,7 +99,6 @@ def test_mongodb_multi_enters_failed_state( mongodb_multi.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_network_partition def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -141,8 +115,9 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): mongodb_multi.load() last_transition_time = mongodb_multi.get_status_last_transition_time() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py similarity index 73% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py index 599015d46..175eaf857 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py @@ -6,57 +6,18 @@ from kubernetes.client.rest import ApiException from kubetester import delete_statefulset, get_statefulset, wait_until from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests.conftest import ( assert_log_rotation_process, - member_cluster_clients, - setup_log_rotate_for_agents, ) -from tests.multicluster.conftest import cluster_spec_list MONGODB_PORT = 30000 -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-central-sts-override.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - additional_mongod_config = { - "systemLog": {"logAppend": True, "verbosity": 4}, - "operationProfiling": {"mode": "slowOp"}, - "net": {"port": MONGODB_PORT}, - } - - resource["spec"]["additionalMongodConfig"] = additional_mongod_config - setup_log_rotate_for_agents(resource) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.set_architecture_annotation() - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -66,19 +27,16 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_replica_set def test_statefulset_is_created_across_multiple_clusters( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): def statefulsets_are_ready(): @@ -105,9 +63,8 @@ def statefulsets_are_ready(): wait_until(statefulsets_are_ready, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set def test_pvc_not_created( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -118,15 +75,12 @@ def test_pvc_not_created( assert e.value.reason == "Not Found" -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester(port=MONGODB_PORT) tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_replica_set -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 cluster_one_client = member_cluster_clients[0] @@ -134,9 +88,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar1", cluster_one_sts) -@pytest.mark.e2e_multi_cluster_replica_set def test_headless_service_creation( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -157,8 +110,7 @@ def test_headless_service_creation( assert len(ep_two.subsets[0].addresses) == mongodb_multi.get_item_spec(cluster_two_client.cluster_name)["members"] -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options(mongodb_multi: MongoDBMulti): +def test_mongodb_options(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 4 @@ -168,8 +120,9 @@ def test_mongodb_options(mongodb_multi: MongoDBMulti): assert_log_rotation_process(process) -@pytest.mark.e2e_multi_cluster_replica_set -def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): +def test_update_additional_options( + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient +): mongodb_multi["spec"]["additionalMongodConfig"]["systemLog"]["verbosity"] = 2 mongodb_multi["spec"]["additionalMongodConfig"]["net"]["maxIncomingConnections"] = 100 # update uses json merge+patch which means that deleting keys is done by setting them to None @@ -180,8 +133,7 @@ def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_ mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 2 @@ -192,10 +144,9 @@ def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): assert "mode" not in process["args2_6"]["operationProfiling"] -@pytest.mark.e2e_multi_cluster_replica_set def test_delete_member_cluster_sts( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): sts_name = "{}-0".format(mongodb_multi.name) @@ -223,8 +174,9 @@ def check_if_sts_was_recreated() -> bool: mongodb_multi.assert_reaches_phase(Phase.Running, timeout=400) -@pytest.mark.e2e_multi_cluster_replica_set -def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_cleanup_on_mdbm_delete( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] +): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) cluster_one_client = member_cluster_clients[0] cluster_one_sts = statefulsets[cluster_one_client.cluster_name] diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py index fd387cdda..55b769a83 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py @@ -1,57 +1,34 @@ from typing import List import kubernetes -import pytest -from kubetester import try_load, wait_until +from kubetester import wait_until from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests import test_logger -from tests.multicluster.conftest import cluster_spec_list logger = test_logger.get_test_logger(__name__) -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - - if try_load(resource): - return resource - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource.update() - - -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) processes = tester.get_replica_set_processes(mongodb_multi.name) assert len(processes) == 5 -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.delete() def wait_for_deleted() -> bool: @@ -68,7 +45,6 @@ def wait_for_deleted() -> bool: wait_until(wait_for_deleted, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deployment_has_been_removed_from_automation_config(): def wait_until_automation_config_is_clean() -> bool: tester = AutomationConfigTester(KubernetesTester.get_automation_config()) @@ -82,9 +58,8 @@ def wait_until_automation_config_is_clean() -> bool: wait_until(wait_until_automation_config_is_clean, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_kubernetes_resources_have_been_cleaned_up( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): def wait_until_secrets_are_removed() -> bool: try: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py new file mode 100644 index 000000000..a9e3c17ff --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py @@ -0,0 +1,27 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_false(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(False) + + +def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.load() + mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_true(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py index 7990943e8..5ca347cbb 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py @@ -1,83 +1,11 @@ from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - - -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - member_options = [ - [ - { - "votes": 1, - "priority": "0.3", - "tags": { - "cluster": "cluster-1", - "region": "weur", - }, - }, - { - "votes": 1, - "priority": "0.7", - "tags": { - "cluster": "cluster-1", - "region": "eeur", - }, - }, - ], - [ - { - "votes": 1, - "priority": "0.2", - "tags": { - "cluster": "cluster-2", - "region": "apac", - }, - }, - ], - [ - { - "votes": 1, - "priority": "1.3", - "tags": { - "cluster": "cluster-3", - "region": "nwus", - }, - }, - { - "votes": 1, - "priority": "2.7", - "tags": { - "cluster": "cluster-3", - "region": "seus", - }, - }, - ], - ] - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set_member_options + + def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -87,18 +15,15 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set_member_options def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() config = mongodb_multi.get_automation_config_tester().automation_config rs = config["replicaSets"] @@ -129,8 +54,7 @@ def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): assert member5["tags"] == {"cluster": "cluster-3", "region": "seus"} -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["memberConfig"][0] = { @@ -158,8 +82,7 @@ def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): } -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -175,8 +98,7 @@ def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): assert updated_member["priority"] == 0.0 -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -188,8 +110,7 @@ def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMult ) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # A member with priority 0.0 could still be a voting member. It cannot become primary and cannot trigger elections. # https://www.mongodb.com/docs/v5.0/core/replica-set-priority-0-member/#priority-0-replica-set-members @@ -199,8 +120,7 @@ def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMu mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][2]["memberConfig"][1]["votes"] = 3 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py new file mode 100644 index 000000000..50d4c8efb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py @@ -0,0 +1,52 @@ +from typing import List + +from kubetester.kubetester import ( + assert_statefulset_architecture, + get_default_architecture, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_migrate_architecture(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + """ + If the E2E is running with default architecture as non-static, + then the test will migrate to static and vice versa. + """ + original_default_architecture = get_default_architecture() + target_architecture = "non-static" if original_default_architecture == "static" else "static" + + mongodb_multi.trigger_architecture_migration() + + mongodb_multi.load() + assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture + + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + for statefulset in statefulsets.values(): + assert_statefulset_architecture(statefulset, target_architecture) + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py similarity index 50% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py index 7ba868397..2c6500e67 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py @@ -1,84 +1,24 @@ from typing import List -import kubernetes -import pytest -from kubetester import try_load from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # start at one member in each cluster - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - if try_load(mongodb_multi_unmarshalled): - return mongodb_multi_unmarshalled - - return mongodb_multi_unmarshalled.update() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -95,14 +35,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 1 # Testing scaling down to zero is required to test fix for https://jira.mongodb.org/browse/CLOUDP-324655 @@ -113,9 +51,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -132,14 +69,11 @@ def test_statefulsets_have_been_scaled_down_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py index 7640c2c4a..4b18b23b0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py @@ -1,84 +1,25 @@ from typing import List -import kubernetes import kubetester -import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - # we have created certs for all 5 members, but want to start at only 3. - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -107,14 +48,12 @@ def fn(): kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster three") -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 2 mongodb_multi["spec"]["clusterSpecList"][1]["members"] = 1 @@ -124,9 +63,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_scaled_up_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -161,14 +99,11 @@ def fn(): ) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py similarity index 84% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py index 5c720264c..03a7fbbf5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py @@ -1,46 +1,23 @@ from typing import List import kubernetes -import pytest from kubetester import wait_until from kubetester.kubetester import KubernetesTester, create_testing_namespace -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_mtls_test def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_mtls_test -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_mongo_pod_in_separate_namespace( member_cluster_clients: List[MultiClusterClient], evergreen_task_id: str, @@ -96,9 +73,8 @@ def pod_is_ready() -> bool: wait_until(pod_is_ready, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_fails_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -126,7 +102,6 @@ def test_connectivity_fails_from_second_namespace( ], f"no expected failure messages found in result: {result}" -@pytest.mark.e2e_multi_cluster_mtls_test def test_enable_istio_injection( member_cluster_clients: List[MultiClusterClient], namespace: str, @@ -138,7 +113,6 @@ def test_enable_istio_injection( corev1.patch_namespace(f"{namespace}-mongo", ns) -@pytest.mark.e2e_multi_cluster_mtls_test def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -154,7 +128,6 @@ def pod_is_deleted() -> bool: wait_until(pod_is_deleted, timeout=120) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -191,9 +164,8 @@ def two_containers_are_present() -> bool: wait_until(two_containers_are_present, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_succeeds_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py similarity index 54% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py index 38350027f..fe0b26b95 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py @@ -3,76 +3,24 @@ import kubernetes import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -92,14 +40,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # remove first and last cluster mongodb_multi["spec"]["clusterSpecList"] = [mongodb_multi["spec"]["clusterSpecList"][1]] @@ -108,9 +54,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800, ignore_errors=True) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets([member_cluster_clients[1]]) @@ -130,15 +75,12 @@ def test_statefulsets_have_been_scaled_down_correctly( assert e.value.reason == "Not Found" -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(1) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): # there should only be one member in cluster 2 so there is just a single service. tester = mongodb_multi.tester(service_names=[f"{mongodb_multi.name}-1-0-svc"]) tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py new file mode 100644 index 000000000..a4d9af724 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py @@ -0,0 +1,96 @@ +from typing import List + +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + # read all statefulsets except the last one + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + + +def test_ops_manager_has_been_updated_correctly_before_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(3) + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + mongodb_multi["spec"]["clusterSpecList"].append( + {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} + ) + mongodb_multi.update() + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + + +def test_ops_manager_has_been_updated_correctly_after_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(5) + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + + +# From here on, the tests are for verifying that we can change the project of the MongoDBMulti | MongoDB resource even with +# non-sequential member ids in the replicaset. + + +class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): + + def test_scale_up_first_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] + ): + # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. + # multi-replica-set-0-0 : 0 + # multi-replica-set-0-1 : 1 + # multi-replica-set-0-2 : 5 + # multi-replica-set-1-0 : 2 + # multi-replica-set-2-0 : 3 + # multi-replica-set-2-1 : 4 + + mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 + mongodb_multi.update() + + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + def test_change_project(mongodb_multi: MongoDBMulti | MongoDB, new_project_configmap: str): + oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap + mongodb_multi.update() + + mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) + mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) + + newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + # Assert that the replica set member ids have not changed after changing the project. + assert oldRsMembers == newRsMembers diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py similarity index 58% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py index f25821c85..1edf35056 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py @@ -1,12 +1,9 @@ from typing import Callable, List import kubernetes -import pytest from kubernetes import client from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient @@ -14,58 +11,7 @@ from kubetester.phase import Phase from tests.conftest import run_kube_config_creation_tool from tests.constants import MULTI_CLUSTER_OPERATOR_NAME -from tests.multicluster.conftest import cluster_spec_list -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # ensure certs are created for the members during scale up - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() - return mongodb_multi_unmarshalled.create() - - -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -77,14 +23,12 @@ def test_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -100,20 +44,17 @@ def test_statefulsets_have_been_created_correctly( assert cluster_two_sts.status.ready_replicas == 1 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): client.AppsV1Api(api_client=central_cluster_client).delete_namespaced_deployment( MULTI_CLUSTER_OPERATOR_NAME, namespace ) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_re_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -126,9 +67,8 @@ def test_re_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_add_new_cluster_to_mongodb_multi_resource( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"].append( @@ -138,9 +78,8 @@ def test_add_new_cluster_to_mongodb_multi_resource( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly_after_cluster_addition( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -159,14 +98,11 @@ def test_statefulsets_have_been_created_correctly_after_cluster_addition( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py index 402355fa1..b71812c41 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py @@ -1,20 +1,17 @@ from typing import List import kubernetes -import pytest from kubetester import create_or_update_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -MDB_RESOURCE = "multi-replica-set-scram" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -23,52 +20,10 @@ NEW_USER_PASSWORD = "my-new-password7" -@pytest.fixture(scope="function") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["security"] = { - "authentication": { - "agents": {"mode": "MONGODB-CR"}, - "enabled": True, - "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], - } - } - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.fixture(scope="function") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.mark.e2e_multi_cluster_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -85,13 +40,11 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Pending, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scram def test_user_reaches_updated( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -99,16 +52,14 @@ def test_user_reaches_updated( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_change_password_and_check_connectivity( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient, ): create_or_update_secret( @@ -125,8 +76,7 @@ def test_change_password_and_check_connectivity( ) -@pytest.mark.e2e_multi_cluster_scram -def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_scram_sha_authentication_fails( password=USER_PASSWORD, @@ -135,10 +85,9 @@ def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti) ) -@pytest.mark.e2e_multi_cluster_scram def test_connection_string_secret_was_created( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for client in member_cluster_clients: @@ -153,7 +102,6 @@ def test_connection_string_secret_was_created( assert "connectionString.standardSrv" in secret_data -@pytest.mark.e2e_multi_cluster_scram def test_om_configured_correctly(): expected_roles = { ("admin", "clusterAdmin"), @@ -170,16 +118,14 @@ def test_om_configured_correctly(): tester.assert_authentication_mechanism_enabled("MONGODB-CR", active_auth_mechanism=False) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, NEW_USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( @@ -195,10 +141,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py new file mode 100644 index 000000000..7a0a70e2e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py @@ -0,0 +1,53 @@ +from typing import List + +import yaml +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_node_ports(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + for mcc in member_cluster_clients: + with open( + yaml_fixture(f"split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml"), + "r", + ) as f: + service_body = yaml.safe_load(f.read()) + + # configure labels and selectors + service_body["metadata"]["labels"][ + "mongodbmulticluster" + ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" + service_body["metadata"]["labels"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + service_body["spec"]["selector"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + + KubernetesTester.create_service( + mongodb_multi.namespace, + body=service_body, + api_client=mcc.api_client, + ) + + +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py similarity index 61% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py index 3a762c580..e05fb74f1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py @@ -1,45 +1,22 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-sts-override.yaml"), - "multi-replica-set-sts-override", - namespace, - ) - resource.set_version(custom_mdb_version) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@pytest.mark.e2e_multi_sts_override def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_sts_override -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_sts_override -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 @@ -54,9 +31,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar2", cluster_two_sts) -@pytest.mark.e2e_multi_sts_override def test_access_modes_pvc( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py index 9932aac29..5b79a3734 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py @@ -3,158 +3,15 @@ import kubernetes from kubernetes import client from kubetester import get_service -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders from tests.conftest import update_coredns_hosts -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" - -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, member_cluster_names: List[str], custom_mdb_version: str -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) - - resource["spec"]["externalAccess"] = {} - resource["spec"]["clusterSpecList"][0]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-1.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing0", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][1]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-2.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing1", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][2]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-3.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing2", - "port": 27019, - }, - ], - } - }, - } - - return resource - - -@fixture(scope="module") -def disable_istio( - multi_cluster_operator: Operator, - namespace: str, - member_cluster_clients: List[MultiClusterClient], -): - for mcc in member_cluster_clients: - api = client.CoreV1Api(api_client=mcc.api_client) - labels = {"istio-injection": "disabled"} - ns = api.read_namespace(name=namespace) - ns.metadata.labels.update(labels) - api.replace_namespace(name=namespace, body=ns) - return None - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - disable_istio, - namespace: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return mongodb_multi_unmarshalled.update() - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@mark.e2e_multi_cluster_tls_no_mesh def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): hosts = [ ("172.18.255.211", "test.kind-e2e-cluster-1.interconnected"), @@ -202,14 +59,12 @@ def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]) update_coredns_hosts(hosts, cluster_name, api_client=cluster_api) -@mark.e2e_multi_cluster_tls_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_no_mesh def test_create_mongodb_multi( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, server_certs: str, multi_cluster_issuer_ca_configmap: str, @@ -219,10 +74,9 @@ def test_create_mongodb_multi( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400, ignore_errors=True) -@mark.e2e_multi_cluster_tls_no_mesh def test_service_overrides( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_idx, member_cluster_client in enumerate(member_cluster_clients): @@ -250,10 +104,9 @@ def test_service_overrides( assert ports[2].port == 27019 -@mark.e2e_multi_cluster_tls_no_mesh def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py index 978ae0832..39f1ebcc5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py @@ -3,22 +3,15 @@ import kubernetes from kubetester import create_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram, with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -26,80 +19,12 @@ USER_PASSWORD = "my-password" -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(ensure_ent_version(custom_mdb_version)) - resource["spec"]["clusterSpecList"] = cluster_spec_list( - member_cluster_names=member_cluster_names, members=[2, 1, 2] - ) - - return resource - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - server_certs: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - - resource = mongodb_multi_unmarshalled - resource["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@fixture(scope="module") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource["spec"]["mongodbResourceRef"]["namespace"] = namespace - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_mongodb_multi_with_tls( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -107,9 +32,8 @@ def test_deploy_mongodb_multi_with_tls( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_update_mongodb_multi_tls_with_scram( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -118,7 +42,6 @@ def test_update_mongodb_multi_tls_with_scram( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -134,16 +57,12 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity( db="admin", @@ -154,11 +73,9 @@ def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -178,11 +95,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -202,9 +117,8 @@ def test_replica_set_connectivity_from_connection_string_standard_srv( ) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_enable_x509( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -219,9 +133,8 @@ def test_mongodb_multi_tls_enable_x509( mongodb_multi.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1000) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_automation_config_was_updated( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py new file mode 100644 index 000000000..2c3e9dc31 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py @@ -0,0 +1,63 @@ +import tempfile + +import kubernetes +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): + ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) + ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") + ac_tester.assert_internal_cluster_authentication_enabled() + + +def test_create_mongodb_x509_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_x509_user: MongoDBUser, + namespace: str, +): + mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + + +def test_x509_user_connectivity( + mongodb_multi: MongoDBMulti | MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer: str, + namespace: str, + ca_path: str, +): + with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: + create_multi_cluster_x509_user_cert( + multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name + ) + tester = mongodb_multi.tester() + tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + + +# TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms +# keeps the resources reachable and in Running state. +def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): + cert = Certificate(name=certificate_name, namespace=namespace) + cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) + cert.load() + cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate + cert.update() + # FIXME the assertions below need to be replaced with a robust check that the agents are ready + # and the TLS certificates are rotated. + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py new file mode 100644 index 000000000..b85c08789 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py @@ -0,0 +1,59 @@ +from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_mongodb_multi_upgrade( + mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str, custom_mdb_version: str +): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) + + +def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py similarity index 53% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py index d24fecf3c..afd618483 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py @@ -1,44 +1,42 @@ import kubernetes -import pytest import yaml from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.operator import Operator -@pytest.mark.e2e_multi_cluster_validation class TestWebhookValidation(KubernetesTester): - def test_deploy_operator(self, multi_cluster_operator: Operator): + def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() - def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_unique_cluster_names(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"].append({"clusterName": "kind-e2e-cluster-1", "members": 1}) - self.create_custom_resource_from_object( - self.get_namespace(), + KubernetesTester.create_custom_resource_from_object( + KubernetesTester.get_namespace(), resource, exception_reason="Multiple clusters with the same name (kind-e2e-cluster-1) are not allowed", api_client=central_cluster_client, ) - def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_only_one_schema(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["cloudManager"] = {"configMapRef": {"name": " my-project"}} - self.create_custom_resource_from_object( - self.get_namespace(), + KubernetesTester.create_custom_resource_from_object( + KubernetesTester.get_namespace(), resource, exception_reason="must validate one and only one schema", api_client=central_cluster_client, ) - def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_non_empty_clusterspec_list(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"] = [] - self.create_custom_resource_from_object( - self.get_namespace(), + KubernetesTester.create_custom_resource_from_object( + KubernetesTester.get_namespace(), resource, exception_reason="ClusterSpecList empty is not allowed, please define at least one cluster", api_client=central_cluster_client, From 4fc56033d3575865766b159e81644f62cabc6975 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 11:15:42 +0100 Subject: [PATCH 02/13] bug fixes --- ...> mongodbmulticluster-multi-central-sts-override.yaml} | 0 ...godbmulticluster_multi_cluster_replica_set_scale_up.py | 2 +- ...ongodbmulticluster_multi_cluster_scale_down_cluster.py | 2 +- ...icluster_multi_cluster_scale_up_cluster_new_cluster.py | 2 +- .../mongodbmulticluster_multi_cluster_tls_with_scram.py | 2 +- .../mongodbmulticluster_multi_cluster_validation.py | 8 ++++---- 6 files changed, 8 insertions(+), 8 deletions(-) rename docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/{mongodbmulticluster-central-sts-override.yaml => mongodbmulticluster-multi-central-sts-override.yaml} (100%) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-central-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-central-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py index bdf324aae..cebe85d8e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py @@ -75,7 +75,7 @@ def test_deploy_operator(multi_cluster_operator: Operator): @pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - testhelper.test_deploy_operator(mongodb_multi) + testhelper.test_create_mongodb_multi(mongodb_multi) @pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py index a528ca716..f7f3e9620 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py @@ -74,7 +74,7 @@ def test_statefulsets_have_been_created_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - testhelper.test_create_mongodb_multi(mongodb_multi, member_cluster_clients) + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) @pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py index c1b456369..1fd805bbf 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py @@ -106,7 +106,7 @@ def test_re_deploy_operator( def test_add_new_cluster_to_mongodb_multi_resource( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] ): - testhelper.test_re_deploy_operator(mongodb_multi, member_cluster_clients) + testhelper.test_add_new_cluster_to_mongodb_multi_resource(mongodb_multi, member_cluster_clients) @pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py index bd590c055..43ac7a25c 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py @@ -124,7 +124,7 @@ def test_create_mongodb_user( @skip_if_local @mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): - testhelper.test_create_mongodb_user(mongodb_multi, ca_path) + testhelper.test_tls_connectivity(mongodb_multi, ca_path) @skip_if_local diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py index 390bbd939..79a098e9e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -13,13 +13,13 @@ @pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation class TestWebhookValidation(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - testhelper.test_deploy_operator(multi_cluster_operator, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_deploy_operator(multi_cluster_operator, MDBM_RESOURCE) def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.test_unique_cluster_names(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_unique_cluster_names(central_cluster_client, MDBM_RESOURCE) def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.test_only_one_schema(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_only_one_schema(central_cluster_client, MDBM_RESOURCE) def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.test_non_empty_clusterspec_list(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(central_cluster_client, MDBM_RESOURCE) From d66ea7b421839f85e56c4c549e2785c3488c6007 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 12:53:17 +0100 Subject: [PATCH 03/13] bug fixes --- .../authentication/mongodb_custom_roles.py | 2 +- ...lticluster_multi_cluster_backup_restore.py | 403 ++++++++++++++++++ ...er_multi_cluster_backup_restore_no_mesh.py | 8 +- ...bmulticluster_multi_cluster_replica_set.py | 2 +- ...dbmulticluster_multi_cluster_validation.py | 2 +- .../multi_cluster_backup_restore_no_mesh.py | 25 +- ...ticluster_appdb_s3_based_backup_restore.py | 2 +- .../multicluster_om_appdb_no_mesh.py | 2 +- 8 files changed, 422 insertions(+), 24 deletions(-) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py index 9665e6169..318a51884 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py @@ -211,7 +211,7 @@ def mc_replica_set( mongodb_role_without_empty_strings: ClusterMongoDBRole, third_project: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(find_fixture("mongodb-multi.yaml"), namespace=namespace) + resource = MongoDBMulti.from_yaml(find_fixture("mongodbmulticluster-multi.yaml"), namespace=namespace) if try_load(resource): return resource diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py new file mode 100644 index 000000000..3eadc4b15 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py @@ -0,0 +1,403 @@ +from typing import Dict, List, Optional + +import kubernetes +import kubernetes.client +import pymongo +import pytest +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + try_load, +) +from kubetester.certs import create_ops_manager_tls_certs +from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from pytest import fixture, mark +from tests.conftest import ( + wait_for_primary, +) + +from ..shared import multi_cluster_backup_restore as testhelper + +MONGODB_PORT = 30000 +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +@fixture(scope="module") +def ops_manager_certs( + namespace: str, + multi_cluster_issuer: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return create_ops_manager_tls_certs( + multi_cluster_issuer, + namespace, + "om-backup", + secret_name="mdb-om-backup-cert", + # We need the interconnected certificate since we update coreDNS later with that ip -> domain + # because our central cluster is not part of the mesh, but we can access the pods via external IPs. + # Since we are using TLS we need a certificate for a hostname, an IP does not work, hence + # f"om-backup.{namespace}.interconnected" -> IP setup below + additional_domains=[ + "fastdl.mongodb.org", + f"om-backup.{namespace}.interconnected", + ], + api_client=central_cluster_client, + ) + + +def new_om_data_store( + mdb: MongoDB, + id: str, + assignment_enabled: bool = True, + user_name: Optional[str] = None, + password: Optional[str] = None, +) -> Dict: + return { + "id": id, + "uri": mdb.mongo_uri(user_name=user_name, password=password), + "ssl": mdb.is_tls_enabled(), + "assignmentEnabled": assignment_enabled, + } + + +@fixture(scope="module") +def ops_manager( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + custom_version: Optional[str], + custom_appdb_version: str, + ops_manager_certs: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource: MongoDBOpsManager = MongoDBOpsManager.from_yaml( + yaml_fixture("om_ops_manager_backup.yaml"), namespace=namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource["spec"]["externalConnectivity"] = {"type": "LoadBalancer"} + resource["spec"]["security"] = { + "certsSecretPrefix": "mdb", + "tls": {"ca": multi_cluster_issuer_ca_configmap}, + } + # remove s3 config + del resource["spec"]["backup"]["s3Stores"] + + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + resource.allow_mdb_rc_versions() + resource.create_admin_secret(api_client=central_cluster_client) + + try_load(resource) + + return resource + + +@fixture(scope="module") +def oplog_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=OPLOG_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="development", + mdb_name=OPLOG_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "development") + + resource["spec"]["opsManager"]["configMapRef"]["name"] = OPLOG_RS_NAME + "-config" + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = {"authentication": {"enabled": True, "modes": ["SCRAM"]}} + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=BLOCKSTORE_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="blockstore", + mdb_name=BLOCKSTORE_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "blockstore") + + resource.set_version(custom_mdb_version) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_user( + namespace, + blockstore_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user-backing-db.yaml"), namespace=namespace) + resource["spec"]["mongodbResourceRef"]["name"] = blockstore_replica_set.name + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def oplog_user( + namespace, + oplog_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml( + yaml_fixture("scram-sha-user-backing-db.yaml"), + namespace=namespace, + name="mms-user-2", + ) + resource["spec"]["mongodbResourceRef"]["name"] = oplog_replica_set.name + resource["spec"]["passwordSecretKeyRef"]["name"] = "mms-user-2-password" + resource["spec"]["username"] = "mms-user-2" + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_create_om(ops_manager) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_daemon_statefulset(ops_manager) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(oplog_user) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(ops_manager) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_fix_om(ops_manager, oplog_user) + + +class TestBackupForMongodb: + @fixture(scope="module") + def base_url( + self, + ops_manager: MongoDBOpsManager, + ) -> str: + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + This address only works for member clusters. + """ + interconnected_field = f"https://om-backup.{ops_manager.namespace}.interconnected" + new_address = f"{interconnected_field}:8443" + + return new_address + + @fixture(scope="module") + def project_one( + self, + ops_manager: MongoDBOpsManager, + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + base_url: str, + ) -> OMTester: + return ops_manager.get_om_tester( + project_name=f"{namespace}-project-one", + api_client=central_cluster_client, + base_url=base_url, + ) + + @fixture(scope="function") + def mdb_client(self, mongodb_multi_one: MongoDBMulti): + return pymongo.MongoClient( + mongodb_multi_one.tester(port=MONGODB_PORT).cnx_string, + **mongodb_multi_one.tester(port=MONGODB_PORT).default_opts, + readPreference="primary", # let's read from the primary and not stale data from the secondary + ) + + @fixture(scope="function") + def mongodb_multi_one_collection(self, mdb_client): + + # Ensure primary is available before proceeding + wait_for_primary(mdb_client) + + return mdb_client["testdb"]["testcollection"] + + @fixture(scope="module") + def mongodb_multi_one( + self, + ops_manager: MongoDBOpsManager, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: List[str], + base_url, + custom_mdb_version: str, + ) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi.yaml"), + "multi-replica-set-one", + namespace, + # the project configmap should be created in the central cluster. + ).configure(ops_manager, f"{namespace}-project-one", api_client=central_cluster_client) + + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = [ + {"clusterName": member_cluster_names[0], "members": 2}, + {"clusterName": member_cluster_names[1], "members": 1}, + {"clusterName": member_cluster_names[2], "members": 2}, + ] + + # creating a cluster with backup should work with custom ports + resource["spec"].update({"additionalMongodConfig": {"net": {"port": MONGODB_PORT}}}) + + resource.configure_backup(mode="enabled") + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + data = KubernetesTester.read_configmap( + namespace, "multi-replica-set-one-config", api_client=central_cluster_client + ) + KubernetesTester.delete_configmap(namespace, "multi-replica-set-one-config", api_client=central_cluster_client) + data["baseUrl"] = base_url + data["sslMMSCAConfigMap"] = multi_cluster_issuer_ca_configmap + create_or_update_configmap( + namespace, + "multi-replica-set-one-config", + data, + api_client=central_cluster_client, + ) + + return resource.update() + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_setup_om_connection( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + testhelper.TestBackupForMongodb.test_setup_om_connection( + ops_manager, central_cluster_client, member_cluster_clients + ) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(mongodb_multi_one) + + @skip_if_local + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + @pytest.mark.flaky(reruns=100, reruns_delay=6) + def test_add_test_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_add_test_data(mongodb_multi_one_collection) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_mdb_backed_up(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_mdb_backed_up(project_one) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_change_mdb_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_change_mdb_data(mongodb_multi_one_collection) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_pit_restore(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_pit_restore(project_one) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore + def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): + testhelper.TestBackupForMongodb.test_data_got_restored(mongodb_multi_one_collection, mdb_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py index 388288eb8..45c74a11c 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py @@ -305,16 +305,16 @@ def test_backup_mdbs_created( oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - testhelper.TestOpsManagerCreation.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - testhelper.TestOpsManagerCreation.test_oplog_user_created(oplog_user) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - testhelper.TestOpsManagerCreation.test_om_failed_oplog_no_user_ref(ops_manager) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - testhelper.TestOpsManagerCreation.test_fix_om(ops_manager, oplog_user) + testhelper.TestBackupDatabasesAdded.test_fix_om(ops_manager, oplog_user) class TestBackupForMongodb: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py index d9f07372e..607250b58 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py @@ -27,7 +27,7 @@ def mongodb_multi( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodbmutlicluster-multi-central-sts-override.yaml"), + yaml_fixture("mongodbmulticluster-multi-central-sts-override.yaml"), MDB_RESOURCE, namespace, ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py index 79a098e9e..be0fbd6cd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -13,7 +13,7 @@ @pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation class TestWebhookValidation(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - testhelper.TestWebhookValidation.test_deploy_operator(multi_cluster_operator, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_deploy_operator(multi_cluster_operator) def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): testhelper.TestWebhookValidation.test_unique_cluster_names(central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py index 68bbaa38d..b4ec6752c 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -81,7 +81,6 @@ class TestOpsManagerCreation: """ def test_create_om( - self, ops_manager: MongoDBOpsManager, ): ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() @@ -96,7 +95,6 @@ def test_create_om( ) def test_daemon_statefulset( - self, ops_manager: MongoDBOpsManager, ): def stateful_set_becomes_ready(): @@ -112,7 +110,6 @@ def stateful_set_becomes_ready(): ) def test_backup_daemon_services_created( - self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): @@ -129,7 +126,6 @@ class TestBackupDatabasesAdded: running state""" def test_backup_mdbs_created( - self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): @@ -137,10 +133,10 @@ def test_backup_mdbs_created( oplog_replica_set.assert_reaches_phase(Phase.Running) blockstore_replica_set.assert_reaches_phase(Phase.Running) - def test_oplog_user_created(self, oplog_user: MongoDBUser): + def test_oplog_user_created(oplog_user: MongoDBUser): oplog_user.assert_reaches_phase(Phase.Updated) - def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" ops_manager.backup_status().assert_reaches_phase( Phase.Failed, @@ -148,7 +144,7 @@ def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): "must be specified using 'mongodbUserRef'", ) - def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): ops_manager.load() ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} ops_manager.update() @@ -165,7 +161,6 @@ def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): class TestBackupForMongodb: def test_setup_om_connection( - self, replica_set_external_hosts: List[Tuple[str, str]], ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, @@ -206,11 +201,11 @@ def test_setup_om_connection( ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address ops_manager.update() - def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mongodb_multi_one_running_state( mongodb_multi_one: MongoDBMulti | MongoDB): # we might fail connection in the beginning since we set a custom dns in coredns mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) - def test_add_test_data(self, mongodb_multi_one_collection): + def test_add_test_data(mongodb_multi_one_collection): max_attempts = 100 while max_attempts > 0: try: @@ -221,16 +216,16 @@ def test_add_test_data(self, mongodb_multi_one_collection): max_attempts -= 1 time.sleep(6) - def test_mdb_backed_up(self, project_one: OMTester): + def test_mdb_backed_up(project_one: OMTester): project_one.wait_until_backup_snapshots_are_ready(expected_count=1) - def test_change_mdb_data(self, mongodb_multi_one_collection): + def test_change_mdb_data(mongodb_multi_one_collection): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) time.sleep(30) mongodb_multi_one_collection.insert_one({"foo": "bar"}) - def test_pit_restore(self, project_one: OMTester): + def test_pit_restore(project_one: OMTester): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) @@ -240,14 +235,14 @@ def test_pit_restore(self, project_one: OMTester): project_one.create_restore_job_pit(pit_millis) - def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mdb_ready(mongodb_multi_one: MongoDBMulti | MongoDB): # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status # right away. # But the agent might still do work on the cluster, so we need to wait for that to happen. mongodb_multi_one.assert_reaches_phase(Phase.Pending) mongodb_multi_one.assert_reaches_phase(Phase.Running) - def test_data_got_restored(self, mongodb_multi_one_collection): + def test_data_got_restored( mongodb_multi_one_collection): assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py index 0d573066c..502d3b7ac 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py @@ -184,7 +184,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py index 34d05184f..5c64acb2f 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py @@ -424,7 +424,7 @@ def mongodb_multi( ops_manager: MongoDBOpsManager, multi_cluster_issuer_ca_configmap: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource.set_architecture_annotation() resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) From c9eed832f09a0506435ed83a21a983917a3008d8 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 16:00:00 +0100 Subject: [PATCH 04/13] rename tests --- .evergreen-tasks.yml | 9 +++++-- .evergreen.yml | 8 +++--- ...icluster_appdb_s3_based_backup_restore.py} | 4 +-- ...r_mongodbmulticluster_om_appdb_no_mesh.py} | 26 +++++++++---------- 4 files changed, 26 insertions(+), 21 deletions(-) rename docker/mongodb-kubernetes-tests/tests/multicluster_appdb/{multicluster_appdb_s3_based_backup_restore.py => multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py} (98%) rename docker/mongodb-kubernetes-tests/tests/multicluster_om/{multicluster_om_appdb_no_mesh.py => multicluster_mongodbmulticluster_om_appdb_no_mesh.py} (96%) diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 3a8a4825d..fea6058a1 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -1013,7 +1013,7 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_appdb_s3_based_backup_restore + - name: e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test @@ -1174,7 +1174,12 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_om_appdb_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_om_appdb_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test diff --git a/.evergreen.yml b/.evergreen.yml index 2aca80943..b1e71a3b4 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -944,11 +944,11 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - e2e_om_appdb_flags_and_config @@ -999,11 +999,11 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_om_appdb_flags_and_config - e2e_om_appdb_upgrade diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py similarity index 98% rename from docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py rename to docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py index 502d3b7ac..ccae3ba74 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py @@ -94,7 +94,7 @@ def ops_manager( @mark.usefixtures("multi_cluster_operator") -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestOpsManagerCreation: """ name: Ops Manager successful creation with backup and oplog stores enabled @@ -150,7 +150,7 @@ def test_om_s3_stores( om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestBackupForMongodb: @fixture(scope="module") def project_one( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py similarity index 96% rename from docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py index 5c64acb2f..46f233601 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py @@ -127,7 +127,7 @@ def s3_bucket_oplog(namespace: str, aws_s3_client: AwsS3Client) -> str: return next(create_s3_bucket_oplog(namespace, aws_s3_client, api_client=get_central_cluster_client())) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_configure_dns(disable_istio): host_mappings = [ ( @@ -198,12 +198,12 @@ def test_configure_dns(disable_istio): ) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_disable_istio(disable_istio): logger.info("Istio disabled") -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_configure_nginx(namespace: str): cluster_client = get_central_cluster_client() @@ -535,12 +535,12 @@ def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, cli create_or_update_configmap(om.namespace, name, data, client) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_deploy_operator(multi_cluster_operator_with_monitored_appdb: Operator): multi_cluster_operator_with_monitored_appdb.assert_is_running() -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_deploy_ops_manager(ops_manager: MongoDBOpsManager): ops_manager.update() ops_manager.om_status().assert_reaches_phase(Phase.Running) @@ -549,26 +549,26 @@ def test_deploy_ops_manager(ops_manager: MongoDBOpsManager): ops_manager.assert_appdb_monitoring_group_was_created() -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_create_mongodb_multi(server_certs: str, mongodb_multi: MongoDBMulti): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400, ignore_errors=True) @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): mongodb_multi_collection.insert_one(TEST_DATA) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_mdb_backed_up(ops_manager: MongoDBOpsManager): ops_manager.get_om_tester(project_name="mongodb").wait_until_backup_snapshots_are_ready(expected_count=1) @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_change_mdb_data(mongodb_multi_collection): now_millis = time_to_millis(datetime.datetime.now(tz=datetime.UTC)) print("\nCurrent time (millis): {}".format(now_millis)) @@ -576,7 +576,7 @@ def test_change_mdb_data(mongodb_multi_collection): mongodb_multi_collection.insert_one({"foo": "bar"}) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_pit_restore(ops_manager: MongoDBOpsManager): now_millis = time_to_millis(datetime.datetime.now(tz=datetime.UTC)) print("\nCurrent time (millis): {}".format(now_millis)) @@ -588,7 +588,7 @@ def test_pit_restore(ops_manager: MongoDBOpsManager): ops_manager.get_om_tester(project_name="mongodb").create_restore_job_pit(pit_millis) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_mdb_ready(mongodb_multi: MongoDBMulti): # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status # right away. @@ -598,7 +598,7 @@ def test_mdb_ready(mongodb_multi: MongoDBMulti): @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_data_got_restored(mongodb_multi_collection): assert_data_got_restored(TEST_DATA, mongodb_multi_collection, timeout=600) @@ -609,7 +609,7 @@ def time_to_millis(date_time) -> int: return pit_millis -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_telemetry_configmap(namespace: str): config = KubernetesTester.read_configmap(namespace, TELEMETRY_CONFIGMAP_NAME) From a95ac8f8e3e59e38bb59ee75695de0fb8944f7a8 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 16:04:27 +0100 Subject: [PATCH 05/13] rename test --- .evergreen-tasks.yml | 2 +- .evergreen.yml | 2 +- ...es.py => mongodbmulticluster_custom_roles.py} | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) rename docker/mongodb-kubernetes-tests/tests/authentication/{mongodb_custom_roles.py => mongodbmulticluster_custom_roles.py} (97%) diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index fea6058a1..7d1cd8734 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -250,7 +250,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_mongodb_custom_roles + - name: e2e_mongodbmulticluster_custom_roles tags: [ "patch-run" ] commands: - func: "e2e_test" diff --git a/.evergreen.yml b/.evergreen.yml index b1e71a3b4..97d62af0a 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -917,7 +917,7 @@ task_groups: - e2e_tls_x509_configure_all_options_sc - e2e_tls_x509_sc - e2e_meko_mck_upgrade - - e2e_mongodb_custom_roles + - e2e_mongodbmulticluster_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py similarity index 97% rename from docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py index 318a51884..ddafc0dae 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py @@ -234,7 +234,7 @@ def mc_replica_set( return resource -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_create_resources( mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, @@ -254,7 +254,7 @@ def test_create_resources( mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_automation_config_has_roles( replica_set: MongoDB, sharded_cluster: MongoDB, @@ -306,7 +306,7 @@ def assert_expected_roles( ) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_change_inherited_role( replica_set: MongoDB, sharded_cluster: MongoDB, @@ -328,7 +328,7 @@ def is_role_changed(ac_tester: AutomationConfigTester): wait_until(lambda: is_role_changed(mc_replica_set.get_automation_config_tester())) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_deleting_role_does_not_remove_access( replica_set: MongoDB, sharded_cluster: MongoDB, @@ -355,7 +355,7 @@ def test_deleting_role_does_not_remove_access( mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): sharded_cluster["spec"]["security"]["roleRefs"] = None sharded_cluster.update() @@ -367,12 +367,12 @@ def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: Mon wait_until(lambda: len(mc_replica_set.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_replicaset_is_failed(replica_set: MongoDB): replica_set.assert_reaches_phase( Phase.Failed, @@ -380,7 +380,7 @@ def test_replicaset_is_failed(replica_set: MongoDB): ) -@mark.e2e_mongodb_custom_roles +@mark.e2e_mongodbmulticluster_custom_roles def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): replica_set["spec"]["security"]["roleRefs"] = None replica_set.update() From 186f5d0fc4541b1ef5f09d0f8ec477f39d5026f0 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 16:09:50 +0100 Subject: [PATCH 06/13] lint fix --- .../mongodbmulticluster_multi_cluster_recover_clusterwide.py | 3 ++- .../mongodbmulticluster_multi_cluster_tls_with_x509.py | 4 +--- .../shared/multi_cluster_backup_restore_no_mesh.py | 4 ++-- .../tests/multicluster/shared/multi_cluster_cli_recover.py | 1 + .../shared/multi_cluster_recover_network_partition.py | 3 ++- .../shared/multi_cluster_scale_up_cluster_new_cluster.py | 1 + 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py index 76e3006da..aec36d2cb 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py @@ -13,10 +13,11 @@ _install_multi_cluster_operator, run_kube_config_creation_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME from tests.multicluster.conftest import ( cluster_spec_list, ) -from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME + from ..shared import multi_cluster_recover_clusterwide as testhelper MDB_RESOURCE = "multi-replica-set" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py index 312c518dd..57d3a4866 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py @@ -120,9 +120,7 @@ def mongodb_multi( @fixture(scope="module") def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml( - yaml_fixture("mongodb-x509-user.yaml"), "multi-replica-set-x509-user", namespace - ) + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-x509-user.yaml"), "multi-replica-set-x509-user", namespace) resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py index b4ec6752c..2892fcf56 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -201,7 +201,7 @@ def test_setup_om_connection( ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address ops_manager.update() - def test_mongodb_multi_one_running_state( mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mongodb_multi_one_running_state(mongodb_multi_one: MongoDBMulti | MongoDB): # we might fail connection in the beginning since we set a custom dns in coredns mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) @@ -242,7 +242,7 @@ def test_mdb_ready(mongodb_multi_one: MongoDBMulti | MongoDB): mongodb_multi_one.assert_reaches_phase(Phase.Pending) mongodb_multi_one.assert_reaches_phase(Phase.Running) - def test_data_got_restored( mongodb_multi_one_collection): + def test_data_got_restored(mongodb_multi_one_collection): assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py index 7fdb9bdc1..5116d323d 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py @@ -11,6 +11,7 @@ ) from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py index 0b6c82fc4..8e25bf490 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py @@ -13,10 +13,11 @@ get_member_cluster_api_client, run_multi_cluster_recovery_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME from tests.multicluster.conftest import ( create_service_entries_objects, ) -from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py index 1edf35056..575ded0d0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py @@ -12,6 +12,7 @@ from tests.conftest import run_kube_config_creation_tool from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], From 3c654432ef136cbaba4201849da47e20174daa4e Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 16:46:25 +0100 Subject: [PATCH 07/13] test renaming --- .evergreen-tasks.yml | 2 +- .evergreen.yml | 6 +++--- ...multicluster_appdb_s3_based_backup_restore.py | 2 +- ...y => meko_mongodbmulticluster_mck_upgrade.py} | 16 ++++++++-------- 4 files changed, 13 insertions(+), 13 deletions(-) rename docker/mongodb-kubernetes-tests/tests/upgrades/{meko_mck_upgrade.py => meko_mongodbmulticluster_mck_upgrade.py} (93%) diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 7d1cd8734..fdd5f3fdd 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -189,7 +189,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_meko_mck_upgrade + - name: e2e_mongodbmulticluster_meko_mck_upgrade tags: [ "patch-run" ] commands: - func: "e2e_test" diff --git a/.evergreen.yml b/.evergreen.yml index 97d62af0a..f8e14c49f 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -820,7 +820,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group # e2e_operator_race_with_telemetry_task_group includes the tests for testing the operator with race detector enabled @@ -848,7 +848,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group - name: e2e_multi_cluster_kind_task_group @@ -916,7 +916,7 @@ task_groups: - e2e_tls_sc_additional_certs - e2e_tls_x509_configure_all_options_sc - e2e_tls_x509_sc - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade - e2e_mongodbmulticluster_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py index ccae3ba74..51882dbf9 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py @@ -53,7 +53,7 @@ def multi_cluster_s3_replica_set( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace ).configure(ops_manager, "s3metadata", api_client=central_cluster_client) resource["spec"]["clusterSpecList"] = cluster_spec_list(appdb_member_cluster_names, [1, 2]) diff --git a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py similarity index 93% rename from docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py rename to docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py index 3520b90cc..41b4c0dfb 100644 --- a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py @@ -54,7 +54,7 @@ def replica_set( ) -> MongoDB: if is_multi_cluster(): resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace, ) @@ -100,26 +100,26 @@ def replica_set( # Installs the latest officially released version of MEKO, from Quay -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_latest_official_operator(official_meko_operator: Operator, namespace: str): official_meko_operator.assert_is_running() # Dumping deployments in logs ensures we are using the correct operator version log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_replicaset(replica_set: MongoDB): replica_set.assert_reaches_phase(phase=Phase.Running, timeout=1000 if is_multi_cluster() else 600) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_downscale_latest_official_operator(namespace: str): deployment_name = LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME downscale_operator_deployment(deployment_name, namespace) # Upgrade to MCK -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_upgrade_operator( namespace: str, operator_installation_config, @@ -151,19 +151,19 @@ def test_upgrade_operator( log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_replicaset_reconciled(replica_set: MongoDB): replica_set.assert_abandons_phase(phase=Phase.Running, timeout=300) replica_set.assert_reaches_phase(phase=Phase.Running, timeout=800) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_uninstall_latest_official_operator(namespace: str): helm_uninstall(LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME) log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_operator_still_running(namespace: str, central_cluster_client: client.ApiClient, member_cluster_names): operator_name = MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else OPERATOR_NAME operator_instance = Operator( From a059392bbc4d166d0b5f23b2aa04008fefd34439 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Mon, 17 Nov 2025 17:38:51 +0100 Subject: [PATCH 08/13] bug fix --- ...godbmulticluster_multi_2_cluster_clusterwide_replicaset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py index 0bfcc4724..0a4131db8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py @@ -171,7 +171,7 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - testhelper.test_create_kube_config_file( + testhelper.test_create_namespaces( namespace, mdba_ns, mdbb_ns, @@ -195,7 +195,7 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - testhelper.test_deploy_operator( + testhelper.test_prepare_namespace( multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) From a9981779e4e00e7fd394c70c90b13c36327a30d4 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Tue, 18 Nov 2025 10:36:09 +0100 Subject: [PATCH 09/13] bug fix --- .../mongodbmulticluster_multi_cluster_ldap.py | 2 +- .../mongodbmulticluster_multi_cluster_reconcile_races.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py index d4d6aa205..bdd3f69fa 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py @@ -240,7 +240,7 @@ def test_disable_agent_auth(mongodb_multi: MongoDBMulti): @skip_if_static_containers @mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti): - testhelper.test_distest_mongodb_multi_connectivity_with_no_authable_agent_auth(mongodb_multi) + testhelper.test_mongodb_multi_connectivity_with_no_auth(mongodb_multi) @skip_if_static_containers diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py index 19767cc06..b8e81317e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py @@ -106,4 +106,4 @@ def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): @pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry def test_telemetry_configmap(namespace: str): - testhelper.test_pod_logs_race_after_restart(namespace) + testhelper.test_telemetry_configmap(namespace) From 74d636bfefcd22273b3649ff9630ecec4498d02f Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Wed, 26 Nov 2025 11:51:51 +0100 Subject: [PATCH 10/13] feedback improvements --- .evergreen-tasks.yml | 2 +- .evergreen.yml | 2 +- .../mongodbmulticluster_custom_roles.py | 176 +++----------- .../authentication/shared/custom_roles.py | 225 ++++++++++++++++++ ...lticluster_multi_cluster_backup_restore.py | 28 +-- ...er_multi_cluster_backup_restore_no_mesh.py | 30 +-- ...lticluster_multi_cluster_oidc_m2m_group.py | 8 +- ...ulticluster_multi_cluster_oidc_m2m_user.py | 10 +- ...ticluster_multi_cluster_reconcile_races.py | 109 --------- ...icluster_multi_cluster_scale_up_cluster.py | 6 +- ...dbmulticluster_multi_cluster_validation.py | 13 +- .../multi_cluster_reconcile_races.py | 80 +++++-- .../shared/multi_cluster_backup_restore.py | 20 +- .../multi_cluster_backup_restore_no_mesh.py | 25 +- .../shared/multi_cluster_oidc_m2m_group.py | 31 ++- .../shared/multi_cluster_oidc_m2m_user.py | 36 ++- .../shared/multi_cluster_scale_up_cluster.py | 4 +- .../shared/multi_cluster_validation.py | 20 +- ...ticluster_appdb_s3_based_backup_restore.py | 61 +---- .../multicluster_appdb/shared/__init__.py | 0 ...ticluster_appdb_s3_based_backup_restore.py | 125 ++++++++++ 21 files changed, 572 insertions(+), 439 deletions(-) create mode 100644 docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py delete mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py rename docker/mongodb-kubernetes-tests/tests/multicluster/{shared => }/multi_cluster_reconcile_races.py (75%) create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py create mode 100644 docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index fdd5f3fdd..c5aa0c1de 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -1190,7 +1190,7 @@ tasks: - func: e2e_test # this test is run, with an operator with race enabled - - name: e2e_mongodbmulticluster_om_reconcile_race_with_telemetry + - name: e2e_om_reconcile_race_with_telemetry tags: [ "patch-run" ] commands: - func: e2e_test diff --git a/.evergreen.yml b/.evergreen.yml index f8e14c49f..5203c4386 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -830,7 +830,7 @@ task_groups: <<: *setup_group_multi_cluster <<: *setup_and_teardown_task tasks: - - e2e_mongodbmulticluster_om_reconcile_race_with_telemetry + - e2e_om_reconcile_race_with_telemetry <<: *teardown_group # e2e_operator_task_group includes the tests for the specific Operator configuration/behavior. They may deal with diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py index ddafc0dae..371015395 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py @@ -10,72 +10,10 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind from pytest import fixture, mark +from tests.authentication.shared import custom_roles as testhelper from tests.multicluster.conftest import cluster_spec_list -# fmt: off -def get_expected_role(role_name: str) -> dict: - return { - "role": role_name, - "db": "admin", - "roles": [ - { - "db": "admin", - "role": "read" - } - ], - "privileges": [ - { - "resource": { - "db": "config", - "collection": "" - }, - "actions": [ - "find", - "update", - "insert", - "remove" - ] - }, - { - "resource": { - "db": "users", - "collection": "usersCollection" - }, - "actions": [ - "update", - "insert", - "remove" - ] - }, - { - "resource": { - "db": "", - "collection": "" - }, - "actions": [ - "find" - ] - }, - { - "resource": { - "cluster": True - }, - "actions": [ - "bypassWriteBlockingMode" - ] - } - ], - "authenticationRestrictions": [ - { - "clientSource": ["127.0.0.0/8"], - "serverAddress": ["10.0.0.0/8"] - } - ], - } -# fmt: on - - @fixture(scope="function") def first_project(namespace: str) -> str: cm = read_configmap(namespace=namespace, name="my-project") @@ -242,16 +180,13 @@ def test_create_resources( sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti, ): - mongodb_role_with_empty_strings.update() - mongodb_role_without_empty_strings.update() - - replica_set.update() - sharded_cluster.update() - mc_replica_set.update() - - replica_set.assert_reaches_phase(Phase.Running, timeout=400) - sharded_cluster.assert_reaches_phase(Phase.Running, timeout=400) - mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) + testhelper.test_create_resources( + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + replica_set, + sharded_cluster, + mc_replica_set, + ) @mark.e2e_mongodbmulticluster_custom_roles @@ -262,10 +197,10 @@ def test_automation_config_has_roles( mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, ): - assert_expected_roles( - mc_replica_set, + testhelper.test_automation_config_has_roles( replica_set, sharded_cluster, + mc_replica_set, mongodb_role_with_empty_strings, mongodb_role_without_empty_strings, ) @@ -278,31 +213,12 @@ def assert_expected_roles( mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, ): - rs_tester = replica_set.get_automation_config_tester() - sc_tester = sharded_cluster.get_automation_config_tester() - mcrs_tester = mc_replica_set.get_automation_config_tester() - mcrs_tester.assert_has_expected_number_of_roles(expected_roles=2) - rs_tester.assert_has_expected_number_of_roles(expected_roles=2) - sc_tester.assert_has_expected_number_of_roles(expected_roles=2) - - rs_tester.assert_expected_role( - role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) - ) - # the second role created without specifying fields with "" should result in identical role to the one with explicitly specified db: "", collection: "". - rs_tester.assert_expected_role( - role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) - ) - sc_tester.assert_expected_role( - role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) - ) - sc_tester.assert_expected_role( - role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) - ) - mcrs_tester.assert_expected_role( - role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) - ) - mcrs_tester.assert_expected_role( - role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + testhelper.assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, ) @@ -314,18 +230,13 @@ def test_change_inherited_role( mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, ): - mongodb_role_with_empty_strings["spec"]["roles"][0]["role"] = "readWrite" - mongodb_role_with_empty_strings.update() - - def is_role_changed(ac_tester: AutomationConfigTester): - return ( - ac_tester.get_role_at_index(0)["roles"][0]["role"] == "readWrite" - and ac_tester.get_role_at_index(1)["roles"][0]["role"] == "read" - ) - - wait_until(lambda: is_role_changed(replica_set.get_automation_config_tester())) - wait_until(lambda: is_role_changed(sharded_cluster.get_automation_config_tester())) - wait_until(lambda: is_role_changed(mc_replica_set.get_automation_config_tester())) + testhelper.test_change_inherited_role( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) @mark.e2e_mongodbmulticluster_custom_roles @@ -335,55 +246,26 @@ def test_deleting_role_does_not_remove_access( mc_replica_set: MongoDBMulti, mongodb_role_with_empty_strings: ClusterMongoDBRole, ): - mongodb_role_with_empty_strings.delete() - - assert try_load(mongodb_role_with_empty_strings) == False - - replica_set.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" - ) - sharded_cluster.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + testhelper.test_deleting_role_does_not_remove_access( + replica_set, sharded_cluster, mc_replica_set, mongodb_role_with_empty_strings ) - mc_replica_set.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" - ) - - # The role should still exist in the automation config - replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) - sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) - mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) @mark.e2e_mongodbmulticluster_custom_roles def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): - sharded_cluster["spec"]["security"]["roleRefs"] = None - sharded_cluster.update() - - mc_replica_set["spec"]["security"]["roleRefs"] = None - mc_replica_set.update() - - wait_until(lambda: len(sharded_cluster.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) - wait_until(lambda: len(mc_replica_set.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) + testhelper.test_removing_role_from_resources(replica_set, sharded_cluster, mc_replica_set) @mark.e2e_mongodbmulticluster_custom_roles def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): - multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() + testhelper.test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles) @mark.e2e_mongodbmulticluster_custom_roles def test_replicaset_is_failed(replica_set: MongoDB): - replica_set.assert_reaches_phase( - Phase.Failed, - msg_regexp="RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration.", - ) + testhelper.test_replicaset_is_failed(replica_set) @mark.e2e_mongodbmulticluster_custom_roles def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): - replica_set["spec"]["security"]["roleRefs"] = None - replica_set.update() - - replica_set.assert_reaches_phase(Phase.Running) - replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) + testhelper.test_replicaset_is_reconciled_without_rolerefs(replica_set) diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py new file mode 100644 index 000000000..52b3980ac --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py @@ -0,0 +1,225 @@ +from kubetester import ( + create_or_update_configmap, + find_fixture, + read_configmap, + try_load, + wait_until, +) +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + + +# fmt: off +def get_expected_role(role_name: str) -> dict: + return { + "role": role_name, + "db": "admin", + "roles": [ + { + "db": "admin", + "role": "read" + } + ], + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find", + "update", + "insert", + "remove" + ] + }, + { + "resource": { + "db": "users", + "collection": "usersCollection" + }, + "actions": [ + "update", + "insert", + "remove" + ] + }, + { + "resource": { + "db": "", + "collection": "" + }, + "actions": [ + "find" + ] + }, + { + "resource": { + "cluster": True + }, + "actions": [ + "bypassWriteBlockingMode" + ] + } + ], + "authenticationRestrictions": [ + { + "clientSource": ["127.0.0.0/8"], + "serverAddress": ["10.0.0.0/8"] + } + ], + } +# fmt: on + + +def test_create_resources( + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, +): + mongodb_role_with_empty_strings.update() + mongodb_role_without_empty_strings.update() + + replica_set.update() + sharded_cluster.update() + mc_replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running, timeout=400) + sharded_cluster.assert_reaches_phase(Phase.Running, timeout=400) + mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) + + +def test_automation_config_has_roles( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +def assert_expected_roles( + mc_replica_set: MongoDBMulti | MongoDB, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + rs_tester = replica_set.get_automation_config_tester() + sc_tester = sharded_cluster.get_automation_config_tester() + mcrs_tester = mc_replica_set.get_automation_config_tester() + mcrs_tester.assert_has_expected_number_of_roles(expected_roles=2) + rs_tester.assert_has_expected_number_of_roles(expected_roles=2) + sc_tester.assert_has_expected_number_of_roles(expected_roles=2) + + rs_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + # the second role created without specifying fields with "" should result in identical role to the one with explicitly specified db: "", collection: "". + rs_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + sc_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + sc_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + mcrs_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + mcrs_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + + +def test_change_inherited_role( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + mongodb_role_with_empty_strings["spec"]["roles"][0]["role"] = "readWrite" + mongodb_role_with_empty_strings.update() + + def is_role_changed(ac_tester: AutomationConfigTester): + return ( + ac_tester.get_role_at_index(0)["roles"][0]["role"] == "readWrite" + and ac_tester.get_role_at_index(1)["roles"][0]["role"] == "read" + ) + + wait_until(lambda: is_role_changed(replica_set.get_automation_config_tester())) + wait_until(lambda: is_role_changed(sharded_cluster.get_automation_config_tester())) + wait_until(lambda: is_role_changed(mc_replica_set.get_automation_config_tester())) + + +def test_deleting_role_does_not_remove_access( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, +): + mongodb_role_with_empty_strings.delete() + + assert try_load(mongodb_role_with_empty_strings) == False + + replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + sharded_cluster.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + mc_replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + + # The role should still exist in the automation config + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + + +def test_removing_role_from_resources( + replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti | MongoDB +): + sharded_cluster["spec"]["security"]["roleRefs"] = None + sharded_cluster.update() + + mc_replica_set["spec"]["security"]["roleRefs"] = None + mc_replica_set.update() + + wait_until(lambda: len(sharded_cluster.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) + wait_until(lambda: len(mc_replica_set.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) + + +def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): + multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() + + +def test_replicaset_is_failed(replica_set: MongoDB): + replica_set.assert_reaches_phase( + Phase.Failed, + msg_regexp="RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration.", + ) + + +def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): + replica_set["spec"]["security"]["roleRefs"] = None + replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running) + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py index 3eadc4b15..3d7ce4bf6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py @@ -236,20 +236,20 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - testhelper.TestOpsManagerCreation.test_create_om(ops_manager) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - testhelper.TestOpsManagerCreation.test_daemon_statefulset(ops_manager) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(namespace, central_cluster_client) + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore @@ -262,16 +262,16 @@ def test_backup_mdbs_created( oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - testhelper.TestBackupDatabasesAdded.test_oplog_user_created(oplog_user) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(ops_manager) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - testhelper.TestBackupDatabasesAdded.test_fix_om(ops_manager, oplog_user) + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -373,31 +373,31 @@ def test_setup_om_connection( member_cluster_clients: List[MultiClusterClient], ): testhelper.TestBackupForMongodb.test_setup_om_connection( - ops_manager, central_cluster_client, member_cluster_clients + self, ops_manager, central_cluster_client, member_cluster_clients ) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(mongodb_multi_one) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - testhelper.TestBackupForMongodb.test_add_test_data(mongodb_multi_one_collection) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mdb_backed_up(self, project_one: OMTester): - testhelper.TestBackupForMongodb.test_mdb_backed_up(project_one) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_change_mdb_data(self, mongodb_multi_one_collection): - testhelper.TestBackupForMongodb.test_change_mdb_data(mongodb_multi_one_collection) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_pit_restore(self, project_one: OMTester): - testhelper.TestBackupForMongodb.test_pit_restore(project_one) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): - testhelper.TestBackupForMongodb.test_data_got_restored(mongodb_multi_one_collection, mdb_client) + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection, mdb_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py index 45c74a11c..2f34a04e5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py @@ -282,20 +282,20 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - testhelper.TestOpsManagerCreation.test_create_om(ops_manager) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - testhelper.TestOpsManagerCreation.test_daemon_statefulset(ops_manager) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(namespace, central_cluster_client) + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh @@ -305,16 +305,16 @@ def test_backup_mdbs_created( oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(oplog_replica_set, blockstore_replica_set) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - testhelper.TestBackupDatabasesAdded.test_oplog_user_created(oplog_user) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(ops_manager) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - testhelper.TestBackupDatabasesAdded.test_fix_om(ops_manager, oplog_user) + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -493,34 +493,34 @@ def test_setup_om_connection( member_cluster_clients: List[MultiClusterClient], ): testhelper.TestBackupForMongodb.test_setup_om_connection( - replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients + self, replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients ) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(mongodb_multi_one) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_add_test_data(self, mongodb_multi_one_collection): - testhelper.TestBackupForMongodb.test_add_test_data(mongodb_multi_one_collection) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_backed_up(self, project_one: OMTester): - testhelper.TestBackupForMongodb.test_mdb_backed_up(project_one) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_change_mdb_data(self, mongodb_multi_one_collection): - testhelper.TestBackupForMongodb.test_change_mdb_data(mongodb_multi_one_collection) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_pit_restore(self, project_one: OMTester): - testhelper.TestBackupForMongodb.test_pit_restore(project_one) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti): - testhelper.TestBackupForMongodb.test_mdb_ready(mongodb_multi_one) + testhelper.TestBackupForMongodb.test_mdb_ready(self, mongodb_multi_one) @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_data_got_restored(self, mongodb_multi_one_collection): - testhelper.TestBackupForMongodb.test_data_got_restored(mongodb_multi_one_collection) + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py index 41978b1d3..ccaae8106 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py @@ -42,13 +42,13 @@ def mongodb_multi( @pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - testhelper.test_deploy_operator(multi_cluster_operator) + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - testhelper.test_create_oidc_replica_set(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - testhelper.test_assert_connectivity(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - testhelper.test_ops_manager_state_updated_correctly(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py index ad41749d0..c5fc0b9bd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py @@ -53,16 +53,16 @@ def oidc_user(namespace) -> MongoDBUser: @pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - testhelper.test_deploy_operator(multi_cluster_operator) + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - testhelper.test_create_oidc_replica_set(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_create_user(self, oidc_user: MongoDBUser): - testhelper.test_create_user(oidc_user) + testhelper.TestOIDCMultiCluster.test_create_user(self, oidc_user) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - testhelper.test_assert_connectivity(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - testhelper.test_ops_manager_state_updated_correctly(mongodb_multi) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py deleted file mode 100644 index b8e81317e..000000000 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_reconcile_races.py +++ /dev/null @@ -1,109 +0,0 @@ -# It's intended to check for reconcile data races. -from typing import Optional - -import kubernetes.client -import pytest -from kubetester import find_fixture, try_load -from kubetester.operator import Operator -from kubetester.opsmanager import MongoDBOpsManager - -from ..shared import multi_cluster_reconcile_races as testhelper - - -@pytest.fixture(scope="module") -def ops_manager( - namespace: str, - custom_version: Optional[str], - custom_appdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBOpsManager: - resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om") - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.set_version(custom_version) - resource.set_appdb_version(custom_appdb_version) - - try_load(resource) - return resource - - -@pytest.fixture(scope="module") -def ops_manager2( - namespace: str, - custom_version: Optional[str], - custom_appdb_version: str, - central_cluster_client: kubernetes.client.ApiClient, -) -> MongoDBOpsManager: - resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om2") - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.set_version(custom_version) - resource.set_appdb_version(custom_appdb_version) - - try_load(resource) - return resource - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_deploy_operator(multi_cluster_operator: Operator): - testhelper.test_deploy_operator(multi_cluster_operator) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_om(ops_manager: MongoDBOpsManager, ops_manager2: MongoDBOpsManager): - testhelper.test_create_om(ops_manager, ops_manager2) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_om_ready(ops_manager: MongoDBOpsManager): - testhelper.test_om_ready(ops_manager) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_om2_ready(ops_manager2: MongoDBOpsManager): - testhelper.test_om2_ready(ops_manager2) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): - testhelper.test_create_mdb(ops_manager, namespace) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_mdbmc(ops_manager: MongoDBOpsManager, namespace: str): - testhelper.test_create_mdbmc(ops_manager, "mongodbmulticluster", namespace) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): - testhelper.test_create_sharded(ops_manager, namespace) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): - testhelper.test_create_standalone(ops_manager, namespace) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): - testhelper.test_create_users(ops_manager, namespace) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_pod_logs_race(multi_cluster_operator: Operator): - testhelper.test_pod_logs_race(multi_cluster_operator) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, multi_cluster_operator: Operator): - testhelper.test_restart_operator_pod(ops_manager, namespace, multi_cluster_operator) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): - testhelper.test_pod_logs_race_after_restart(multi_cluster_operator) - - -@pytest.mark.e2e_mongodbmulticluster_om_reconcile_race_with_telemetry -def test_telemetry_configmap(namespace: str): - testhelper.test_telemetry_configmap(namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py index c9d5b4e61..b5146c652 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py @@ -153,8 +153,10 @@ def test_scale_up_first_cluster( self, mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] ): testhelper.TestNonSequentialMemberIdsInReplicaSet.test_scale_up_first_cluster( - mongodb_multi, member_cluster_clients + self, mongodb_multi, member_cluster_clients ) def test_change_project(self, mongodb_multi: MongoDBMulti, new_project_configmap: str): - testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project(mongodb_multi, new_project_configmap) + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project( + self, mongodb_multi, new_project_configmap + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py index be0fbd6cd..bba02ec46 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -1,11 +1,8 @@ import kubernetes import pytest -import yaml from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture from kubetester.operator import Operator - -from ..shared import multi_cluster_validation as testhelper +from tests.multicluster.shared import multi_cluster_validation as testhelper MDBM_RESOURCE = "mongodbmulticluster-multi-cluster.yaml" @@ -13,13 +10,13 @@ @pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation class TestWebhookValidation(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - testhelper.TestWebhookValidation.test_deploy_operator(multi_cluster_operator) + testhelper.TestWebhookValidation.test_deploy_operator(self, multi_cluster_operator) def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.TestWebhookValidation.test_unique_cluster_names(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_unique_cluster_names(self, central_cluster_client, MDBM_RESOURCE) def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.TestWebhookValidation.test_only_one_schema(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_only_one_schema(self, central_cluster_client, MDBM_RESOURCE) def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): - testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(central_cluster_client, MDBM_RESOURCE) + testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(self, central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py similarity index 75% rename from docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py index 476288758..d605ccf82 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py @@ -1,9 +1,11 @@ # It's intended to check for reconcile data races. import json import time +from typing import Optional +import kubernetes.client import pytest -from kubetester import create_or_update_secret, try_load +from kubetester import create_or_update_secret, find_fixture, try_load from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb import MongoDB @@ -21,6 +23,40 @@ from tests.multicluster.conftest import cluster_spec_list +@pytest.fixture(scope="module") +def ops_manager( + namespace: str, + custom_version: Optional[str], + custom_appdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om") + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + + try_load(resource) + return resource + + +@pytest.fixture(scope="module") +def ops_manager2( + namespace: str, + custom_version: Optional[str], + custom_appdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource = MongoDBOpsManager.from_yaml(find_fixture("om_validation.yaml"), namespace=namespace, name="om2") + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + + try_load(resource) + return resource + + def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: name = f"mdb-{idx}-rs" resource = MongoDB.from_yaml( @@ -34,21 +70,14 @@ def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: return resource -def get_mdbmc(ops_manager, type: str, namespace: str, idx: int) -> MongoDBMulti | MongoDB: +def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti | MongoDB: name = f"mdb-{idx}-mc" - resourceName = f"{type}-multi-cluster.yaml" - if type == "mongodb": - resource = MongoDB.from_yaml( - yaml_fixture(resourceName), - namespace=namespace, - name=name, - ).configure(ops_manager, name, api_client=get_central_cluster_client()) - else: - resource = MongoDBMulti.from_yaml( - yaml_fixture(resourceName), - namespace=namespace, - name=name, - ).configure(ops_manager, name, api_client=get_central_cluster_client()) + resourceName = f"mongodbmulticluster-multi-cluster.yaml" + resource = MongoDBMulti.from_yaml( + yaml_fixture(resourceName), + namespace=namespace, + name=name, + ).configure(ops_manager, name, api_client=get_central_cluster_client()) try_load(resource) return resource @@ -94,8 +123,8 @@ def get_all_rs(ops_manager, namespace) -> list[MongoDB]: return [get_replica_set(ops_manager, namespace, idx) for idx in range(0, 5)] -def get_all_mdbmc(ops_manager, type, namespace) -> list[MongoDB]: - return [get_mdbmc(ops_manager, type, namespace, idx) for idx in range(0, 4)] +def get_all_mdbmc(ops_manager, namespace) -> list[MongoDB]: + return [get_mdbmc(ops_manager, namespace, idx) for idx in range(0, 4)] def get_all_standalone(ops_manager, namespace) -> list[MongoDB]: @@ -106,25 +135,30 @@ def get_all_users(ops_manager, namespace, mdb: MongoDB) -> list[MongoDBUser]: return [get_user(ops_manager, namespace, idx, mdb) for idx in range(0, 2)] +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_om(ops_manager: MongoDBOpsManager, ops_manager2: MongoDBOpsManager): ops_manager.update() ops_manager2.update() +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_om_ready(ops_manager: MongoDBOpsManager): ops_manager.appdb_status().assert_reaches_phase(Phase.Running, timeout=1800) ops_manager.om_status().assert_reaches_phase(Phase.Running, timeout=1800) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_om2_ready(ops_manager2: MongoDBOpsManager): ops_manager2.appdb_status().assert_reaches_phase(Phase.Running, timeout=1800) ops_manager2.om_status().assert_reaches_phase(Phase.Running, timeout=1800) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_rs(ops_manager, namespace): resource["spec"]["security"] = { @@ -137,8 +171,9 @@ def test_create_mdb(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) -def test_create_mdbmc(ops_manager: MongoDBOpsManager, type: str, namespace: str): - for resource in get_all_mdbmc(ops_manager, type, namespace): +@pytest.mark.e2e_om_reconcile_race_with_telemetry +def test_create_mdbmc(ops_manager: MongoDBOpsManager, namespace: str): + for resource in get_all_mdbmc(ops_manager, namespace): resource.set_version(get_custom_mdb_version()) resource["spec"]["clusterSpecList"] = cluster_spec_list(get_member_cluster_names(), [1, 1, 1]) resource.update() @@ -147,6 +182,7 @@ def test_create_mdbmc(ops_manager: MongoDBOpsManager, type: str, namespace: str) r.assert_reaches_phase(Phase.Running) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_sharded(ops_manager, namespace): resource.set_version(get_custom_mdb_version()) @@ -156,6 +192,7 @@ def test_create_sharded(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): for resource in get_all_standalone(ops_manager, namespace): resource.set_version(get_custom_mdb_version()) @@ -165,6 +202,7 @@ def test_create_standalone(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): create_or_update_secret( namespace, @@ -181,6 +219,7 @@ def test_create_users(ops_manager: MongoDBOpsManager, namespace: str): r.assert_reaches_phase(Phase.Running) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_pod_logs_race(multi_cluster_operator: Operator): pods = multi_cluster_operator.list_operator_pods() pod_name = pods[0].metadata.name @@ -192,6 +231,7 @@ def test_pod_logs_race(multi_cluster_operator: Operator): assert not contains_race +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, multi_cluster_operator: Operator): # this enforces a requeue of all existing resources, increasing the chances of races to happen multi_cluster_operator.restart_operator_deployment() @@ -201,6 +241,7 @@ def test_restart_operator_pod(ops_manager: MongoDBOpsManager, namespace: str, mu r.assert_reaches_phase(Phase.Running) +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): pods = multi_cluster_operator.list_operator_pods() pod_name = pods[0].metadata.name @@ -212,6 +253,7 @@ def test_pod_logs_race_after_restart(multi_cluster_operator: Operator): assert not contains_race +@pytest.mark.e2e_om_reconcile_race_with_telemetry def test_telemetry_configmap(namespace: str): config = KubernetesTester.read_configmap(namespace, TELEMETRY_CONFIGMAP_NAME) for ts_key in ["lastSendTimestampClusters", "lastSendTimestampDeployments", "lastSendTimestampOperators"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py index f1eec9b20..1f917a092 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -104,6 +104,7 @@ class TestBackupDatabasesAdded: running state""" def test_backup_mdbs_created( + self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): @@ -111,10 +112,10 @@ def test_backup_mdbs_created( oplog_replica_set.assert_reaches_phase(Phase.Running) blockstore_replica_set.assert_reaches_phase(Phase.Running) - def test_oplog_user_created(oplog_user: MongoDBUser): + def test_oplog_user_created(self, oplog_user: MongoDBUser): oplog_user.assert_reaches_phase(Phase.Updated) - def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" ops_manager.backup_status().assert_reaches_phase( Phase.Failed, @@ -122,7 +123,7 @@ def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): "must be specified using 'mongodbUserRef'", ) - def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): ops_manager.load() ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} ops_manager.update() @@ -138,6 +139,7 @@ def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): class TestBackupForMongodb: def test_setup_om_connection( + self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], @@ -174,23 +176,23 @@ def test_setup_om_connection( ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address ops_manager.update() - def test_mongodb_multi_one_running_state(mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): # we might fail connection in the beginning since we set a custom dns in coredns mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) - def test_add_test_data(mongodb_multi_one_collection): + def test_add_test_data(self, mongodb_multi_one_collection): mongodb_multi_one_collection.insert_one(TEST_DATA) - def test_mdb_backed_up(project_one: OMTester): + def test_mdb_backed_up(self, project_one: OMTester): project_one.wait_until_backup_snapshots_are_ready(expected_count=1) - def test_change_mdb_data(mongodb_multi_one_collection): + def test_change_mdb_data(self, mongodb_multi_one_collection): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) time.sleep(30) mongodb_multi_one_collection.insert_one({"foo": "bar"}) - def test_pit_restore(project_one: OMTester): + def test_pit_restore(self, project_one: OMTester): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) @@ -203,7 +205,7 @@ def test_pit_restore(project_one: OMTester): project_one.create_restore_job_pit(pit_millis) - def test_data_got_restored(mongodb_multi_one_collection, mdb_client): + def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py index 2892fcf56..68bbaa38d 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -81,6 +81,7 @@ class TestOpsManagerCreation: """ def test_create_om( + self, ops_manager: MongoDBOpsManager, ): ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() @@ -95,6 +96,7 @@ def test_create_om( ) def test_daemon_statefulset( + self, ops_manager: MongoDBOpsManager, ): def stateful_set_becomes_ready(): @@ -110,6 +112,7 @@ def stateful_set_becomes_ready(): ) def test_backup_daemon_services_created( + self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): @@ -126,6 +129,7 @@ class TestBackupDatabasesAdded: running state""" def test_backup_mdbs_created( + self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): @@ -133,10 +137,10 @@ def test_backup_mdbs_created( oplog_replica_set.assert_reaches_phase(Phase.Running) blockstore_replica_set.assert_reaches_phase(Phase.Running) - def test_oplog_user_created(oplog_user: MongoDBUser): + def test_oplog_user_created(self, oplog_user: MongoDBUser): oplog_user.assert_reaches_phase(Phase.Updated) - def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" ops_manager.backup_status().assert_reaches_phase( Phase.Failed, @@ -144,7 +148,7 @@ def test_om_failed_oplog_no_user_ref(ops_manager: MongoDBOpsManager): "must be specified using 'mongodbUserRef'", ) - def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): ops_manager.load() ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} ops_manager.update() @@ -161,6 +165,7 @@ def test_fix_om(ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): class TestBackupForMongodb: def test_setup_om_connection( + self, replica_set_external_hosts: List[Tuple[str, str]], ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, @@ -201,11 +206,11 @@ def test_setup_om_connection( ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address ops_manager.update() - def test_mongodb_multi_one_running_state(mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): # we might fail connection in the beginning since we set a custom dns in coredns mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) - def test_add_test_data(mongodb_multi_one_collection): + def test_add_test_data(self, mongodb_multi_one_collection): max_attempts = 100 while max_attempts > 0: try: @@ -216,16 +221,16 @@ def test_add_test_data(mongodb_multi_one_collection): max_attempts -= 1 time.sleep(6) - def test_mdb_backed_up(project_one: OMTester): + def test_mdb_backed_up(self, project_one: OMTester): project_one.wait_until_backup_snapshots_are_ready(expected_count=1) - def test_change_mdb_data(mongodb_multi_one_collection): + def test_change_mdb_data(self, mongodb_multi_one_collection): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) time.sleep(30) mongodb_multi_one_collection.insert_one({"foo": "bar"}) - def test_pit_restore(project_one: OMTester): + def test_pit_restore(self, project_one: OMTester): now_millis = time_to_millis(datetime.datetime.now()) print("\nCurrent time (millis): {}".format(now_millis)) @@ -235,14 +240,14 @@ def test_pit_restore(project_one: OMTester): project_one.create_restore_job_pit(pit_millis) - def test_mdb_ready(mongodb_multi_one: MongoDBMulti | MongoDB): + def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti | MongoDB): # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status # right away. # But the agent might still do work on the cluster, so we need to wait for that to happen. mongodb_multi_one.assert_reaches_phase(Phase.Pending) mongodb_multi_one.assert_reaches_phase(Phase.Running) - def test_data_got_restored(mongodb_multi_one_collection): + def test_data_got_restored(self, mongodb_multi_one_collection): assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py index 72bd99b99..28620bb2c 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py @@ -1,24 +1,23 @@ +from kubetester.kubetester import KubernetesTester from kubetester.mongodb import MongoDB, Phase from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator -def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -def test_create_oidc_replica_set(mongodb_multi: MongoDBMulti | MongoDB): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() - -def test_assert_connectivity(mongodb_multi: MongoDBMulti | MongoDB): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() - - -def test_ops_manager_state_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(0) - tester.assert_authoritative_set(True) + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(0) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py index c58b770d3..ee01979e0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py @@ -1,29 +1,27 @@ +from kubetester.kubetester import KubernetesTester from kubetester.mongodb import MongoDB, Phase from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.operator import Operator -def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -def test_create_oidc_replica_set(mongodb_multi: MongoDBMulti | MongoDB): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + def test_create_user(self, oidc_user: MongoDBUser): + oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() -def test_create_user(oidc_user: MongoDBUser): - oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) - - -def test_assert_connectivity(mongodb_multi: MongoDBMulti | MongoDB): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() - - -def test_ops_manager_state_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(1) - tester.assert_authoritative_set(True) + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(1) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py index a4d9af724..c2a8da08b 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py @@ -65,7 +65,7 @@ def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): def test_scale_up_first_cluster( - mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] + self, mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. # multi-replica-set-0-0 : 0 @@ -81,7 +81,7 @@ def test_scale_up_first_cluster( mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) - def test_change_project(mongodb_multi: MongoDBMulti | MongoDB, new_project_configmap: str): + def test_change_project(self, mongodb_multi: MongoDBMulti | MongoDB, new_project_configmap: str): oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py index afd618483..1d4e34986 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py @@ -6,37 +6,37 @@ class TestWebhookValidation(KubernetesTester): - def test_deploy_operator(multi_cluster_operator: Operator): + def test_deploy_operator(self, multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() - def test_unique_cluster_names(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"].append({"clusterName": "kind-e2e-cluster-1", "members": 1}) - KubernetesTester.create_custom_resource_from_object( - KubernetesTester.get_namespace(), + self.create_custom_resource_from_object( + self.get_namespace(), resource, exception_reason="Multiple clusters with the same name (kind-e2e-cluster-1) are not allowed", api_client=central_cluster_client, ) - def test_only_one_schema(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["cloudManager"] = {"configMapRef": {"name": " my-project"}} - KubernetesTester.create_custom_resource_from_object( - KubernetesTester.get_namespace(), + self.create_custom_resource_from_object( + self.get_namespace(), resource, exception_reason="must validate one and only one schema", api_client=central_cluster_client, ) - def test_non_empty_clusterspec_list(central_cluster_client: kubernetes.client.ApiClient, fixture: str): + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"] = [] - KubernetesTester.create_custom_resource_from_object( - KubernetesTester.get_namespace(), + self.create_custom_resource_from_object( + self.get_namespace(), resource, exception_reason="ClusterSpecList empty is not allowed, please define at least one cluster", api_client=central_cluster_client, diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py index 51882dbf9..dddb83999 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py @@ -25,6 +25,9 @@ from tests.conftest import assert_data_got_restored from tests.constants import AWS_REGION from tests.multicluster.conftest import cluster_spec_list +from tests.multicluster_appdb.shared import ( + multicluster_appdb_s3_based_backup_restore as testhelper, +) @fixture(scope="module") @@ -106,48 +109,28 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["members"] = 1 - ops_manager.update() - - ops_manager.appdb_status().assert_reaches_phase(Phase.Running) - ops_manager.om_status().assert_reaches_phase(Phase.Running) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_om_is_running( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - # at this point AppDB is used as the "metadatastore" - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_healthiness() + testhelper.TestOpsManagerCreation.test_om_is_running(self, ops_manager, central_cluster_client) def test_add_metadatastore( self, multi_cluster_s3_replica_set: MongoDBMulti, ops_manager: MongoDBOpsManager, ): - multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) - - # configure metadatastore in om, use dedicate MDB instead of AppDB - ops_manager.load() - ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} - ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { - "name": multi_cluster_s3_replica_set.name - } - ops_manager.update() - - ops_manager.om_status().assert_reaches_phase(Phase.Running) - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + testhelper.TestOpsManagerCreation.test_add_metadatastore(self, multi_cluster_s3_replica_set, ops_manager) def test_om_s3_stores( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) - om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + testhelper.TestOpsManagerCreation.test_om_s3_stores(self, ops_manager, central_cluster_client) @mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore @@ -202,38 +185,20 @@ def mongodb_multi_one( return resource.update() def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) - - project_one.create_restore_job_pit(pit_millis) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py new file mode 100644 index 000000000..1190abdf0 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py @@ -0,0 +1,125 @@ +import datetime +import time + +import kubernetes.client +import pymongo +import pytest +from kubetester import create_or_update_configmap +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.omtester import OMTester +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.common.constants import ( + S3_BLOCKSTORE_NAME, + S3_OPLOG_NAME, + TEST_DATA, +) +from tests.conftest import assert_data_got_restored +from tests.constants import AWS_REGION + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["members"] = 1 + ops_manager.update() + + ops_manager.appdb_status().assert_reaches_phase(Phase.Running) + ops_manager.om_status().assert_reaches_phase(Phase.Running) + + def test_om_is_running( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + # at this point AppDB is used as the "metadatastore" + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_healthiness() + + def test_add_metadatastore( + self, + multi_cluster_s3_replica_set: MongoDBMulti, + ops_manager: MongoDBOpsManager, + ): + multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) + + # configure metadatastore in om, use dedicate MDB instead of AppDB + ops_manager.load() + ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} + ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { + "name": multi_cluster_s3_replica_set.name + } + ops_manager.update() + + ops_manager.om_status().assert_reaches_phase(Phase.Running) + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + + def test_om_s3_stores( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) + om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + + +class TestBackupForMongodb: + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + + @pytest.mark.flaky(reruns=100, reruns_delay=6) + def test_add_test_data(self, mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis From 5aed8875e6aeb0f07bea8ddf66d3c1ee7a19484d Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Wed, 26 Nov 2025 12:14:59 +0100 Subject: [PATCH 11/13] revert unused changes --- .../tests/multicluster/multi_cluster_reconcile_races.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py index d605ccf82..0860a511a 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py @@ -70,14 +70,14 @@ def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: return resource -def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti | MongoDB: +def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti: name = f"mdb-{idx}-mc" - resourceName = f"mongodbmulticluster-multi-cluster.yaml" resource = MongoDBMulti.from_yaml( - yaml_fixture(resourceName), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), namespace=namespace, name=name, ).configure(ops_manager, name, api_client=get_central_cluster_client()) + try_load(resource) return resource @@ -268,4 +268,4 @@ def test_telemetry_configmap(namespace: str): assert isinstance(payload, list), "payload should be a list" assert len(payload) > 0, "payload should not be empty" except json.JSONDecodeError: - pytest.fail("payload contains invalid JSON data") + pytest.fail("payload contains invalid JSON data") \ No newline at end of file From 918f9a913f868a2eda8a5832826022803e848943 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Wed, 26 Nov 2025 13:48:59 +0100 Subject: [PATCH 12/13] bug fix --- .../tests/multicluster/shared/multi_cluster_backup_restore.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py index 1f917a092..22683f35c 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -59,6 +59,7 @@ class TestOpsManagerCreation: """ def test_create_om( + self, ops_manager: MongoDBOpsManager, ): ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() @@ -73,6 +74,7 @@ def test_create_om( ) def test_daemon_statefulset( + self, ops_manager: MongoDBOpsManager, ): def stateful_set_becomes_ready(): @@ -88,6 +90,7 @@ def stateful_set_becomes_ready(): ) def test_backup_daemon_services_created( + self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): From fa59e99bfb815a63431e35e75aa0c0eb2b8a23f0 Mon Sep 17 00:00:00 2001 From: filipcirtog Date: Wed, 26 Nov 2025 15:53:03 +0100 Subject: [PATCH 13/13] fix: linter --- .../tests/multicluster/multi_cluster_reconcile_races.py | 2 +- .../tests/multicluster/shared/multi_cluster_backup_restore.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py index 0860a511a..5e4dc5e6e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py @@ -268,4 +268,4 @@ def test_telemetry_configmap(namespace: str): assert isinstance(payload, list), "payload should be a list" assert len(payload) > 0, "payload should not be empty" except json.JSONDecodeError: - pytest.fail("payload contains invalid JSON data") \ No newline at end of file + pytest.fail("payload contains invalid JSON data") diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py index 22683f35c..d118e2b66 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -74,7 +74,7 @@ def test_create_om( ) def test_daemon_statefulset( - self, + self, ops_manager: MongoDBOpsManager, ): def stateful_set_becomes_ready(): @@ -90,7 +90,7 @@ def stateful_set_becomes_ready(): ) def test_backup_daemon_services_created( - self, + self, namespace, central_cluster_client: kubernetes.client.ApiClient, ):