Skip to content

Commit 36f1085

Browse files
authored
CLOUDP-324440 Save rs member ids (#206)
# Summary This PR fixes the behaviour of changing the project in a MongoDBMultiCluster resource. Multicluster resources have a different method of computing the member ids of each process in a replicaset (`replicasets` field in AC). Because of that, it is possible to get non-sequential ids by doing scaling operations. For example, we start from this * mongodb-0-0: id 0 * mongodb-0-1: id 1 * mongodb-1-0: id 2 If we now scale the first cluster up we end up with: * mongodb-0-0: id 0 * mongodb-0-1: id 1 * mongodb-1-0: id 2 * mongodb-0-2: id 3 If we now change the project, the operator will find an empty AC and will recalculate the ids it of the replicaset members. * mongodb-0-0: id 0 * mongodb-0-1: id 1 * mongodb-0-2: id 2 * mongodb-1-0: id 3 But now, member `0-2` has id 2 instead of 3, and member `1-0` has id 3 instead of 2. This will instruct the agents to change the members of the replicaset, but this change will be rejected, leaving the deployment stuck. The operator can't retrieve the previous member ids. In this PR we will now save the member ids we achieved in an annotation, and whenever there is an empty AC in OM, we try to read the annotation in case the deployment is being migrated to a new project. ### Other considerations * The id of a replica set member has to be an integer, so we can't use the hostnames instead. * We can bring a future improvement to the id computation method by using a hash between cluster index and pod index. This way, any id the operator computes will be consistent and do not require reading the AC. The issue is that changing the id of a replicaset member is a convoluted process and migrating the old pseudo-sequential ids to hashed ids is extremely complex. This means that while there still are deployments that use these kind of id's we need to back these id's somewhere in case the deployment is moved to a different project. This idea will be discussed further. * If the `MongoDBMultiCluster` is backed up with annotations and then re-applied to a new project, the annotation will be used to compute the member ids. This is not an issue, there is no restrictions on these ids (apart from being an integer and unique). After the initial deployment, any scaling operations will simply compute new sequential ids. ## Proof of Work Added unit tests to check that annotations are created and updated successfully, as well as using the annotation when building the replicaset. Also added an E2E test to ensure the resource will become ready. ## Checklist - [x] Have you linked a jira ticket and/or is the ticket in the title? - [x] Have you checked whether your jira ticket required DOCSP changes? - [x] Have you checked for release_note changes?
1 parent 9ec721a commit 36f1085

13 files changed

+283
-67
lines changed

RELEASE_NOTES.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@
2525
* [Manage Database Users using OIDC](https://www.mongodb.com/docs/kubernetes/upcoming/manage-users/) # TODO
2626
* [Authentication and Authorization with OIDC/OAuth 2.0](https://www.mongodb.com/docs/manual/core/oidc/security-oidc/)
2727

28+
## Bug Fixes
29+
* Fixed an issue where moving a **MongoDBMultiCluster** resource to a new project (or a new OM instance) would leave the deployment in a failed state.
30+
2831
<!-- Past Releases -->
2932

3033
# MCK 1.1.0 Release Notes

controllers/om/automation_config_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1059,7 +1059,7 @@ func TestApplyInto(t *testing.T) {
10591059
}
10601060

10611061
func changeTypes(deployment Deployment) error {
1062-
rs := deployment.getReplicaSets()
1062+
rs := deployment.GetReplicaSets()
10631063
deployment.setReplicaSets(rs)
10641064
return nil
10651065
}

controllers/om/deployment.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -280,13 +280,13 @@ func (d Deployment) AddMonitoringAndBackup(log *zap.SugaredLogger, tls bool, caF
280280
d.addBackup(log)
281281
}
282282

283-
// DEPRECATED: this shouldn't be used as it may panic because of different underlying type; use getReplicaSets instead
283+
// DEPRECATED: this shouldn't be used as it may panic because of different underlying type; use GetReplicaSets instead
284284
func (d Deployment) ReplicaSets() []ReplicaSet {
285285
return d["replicaSets"].([]ReplicaSet)
286286
}
287287

288288
func (d Deployment) GetReplicaSetByName(name string) ReplicaSet {
289-
for _, rs := range d.getReplicaSets() {
289+
for _, rs := range d.GetReplicaSets() {
290290
if rs.Name() == name {
291291
return rs
292292
}
@@ -395,7 +395,7 @@ func (d Deployment) RemoveReplicaSetByName(name string, log *zap.SugaredLogger)
395395
return xerrors.New("ReplicaSet does not exist")
396396
}
397397

398-
currentRs := d.getReplicaSets()
398+
currentRs := d.GetReplicaSets()
399399
toKeep := make([]ReplicaSet, len(currentRs)-1)
400400
i := 0
401401
for _, el := range currentRs {
@@ -685,7 +685,7 @@ func (d Deployment) ProcessesCopy() []Process {
685685

686686
// ReplicaSetsCopy returns the COPY of replicasets in the deployment.
687687
func (d Deployment) ReplicaSetsCopy() []ReplicaSet {
688-
return d.deepCopy().getReplicaSets()
688+
return d.deepCopy().GetReplicaSets()
689689
}
690690

691691
// ShardedClustersCopy returns the COPY of sharded clusters in the deployment.
@@ -958,7 +958,7 @@ func (d Deployment) getProcessByName(name string) *Process {
958958
}
959959

960960
func (d Deployment) getReplicaSetByName(name string) *ReplicaSet {
961-
for _, r := range d.getReplicaSets() {
961+
for _, r := range d.GetReplicaSets() {
962962
if r.Name() == name {
963963
return &r
964964
}
@@ -977,7 +977,7 @@ func (d Deployment) getShardedClusterByName(name string) *ShardedCluster {
977977
return nil
978978
}
979979

980-
func (d Deployment) getReplicaSets() []ReplicaSet {
980+
func (d Deployment) GetReplicaSets() []ReplicaSet {
981981
switch v := d["replicaSets"].(type) {
982982
case []ReplicaSet:
983983
return v
@@ -997,7 +997,7 @@ func (d Deployment) setReplicaSets(replicaSets []ReplicaSet) {
997997
}
998998

999999
func (d Deployment) addReplicaSet(rs ReplicaSet) {
1000-
d.setReplicaSets(append(d.getReplicaSets(), rs))
1000+
d.setReplicaSets(append(d.GetReplicaSets(), rs))
10011001
}
10021002

10031003
func (d Deployment) getShardedClusters() []ShardedCluster {
@@ -1052,7 +1052,7 @@ func (d Deployment) findReplicaSetsRemovedFromShardedCluster(clusterName string)
10521052
clusterReplicaSets := shardedCluster.getAllReplicaSets()
10531053
var ans []string
10541054

1055-
for _, v := range d.getReplicaSets() {
1055+
for _, v := range d.GetReplicaSets() {
10561056
if !stringutil.Contains(clusterReplicaSets, v.Name()) && isShardOfShardedCluster(clusterName, v.Name()) {
10571057
ans = append(ans, v.Name())
10581058
}

controllers/om/deployment_test.go

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -56,25 +56,25 @@ func TestMergeReplicaSet(t *testing.T) {
5656
expectedRs := buildRsByProcesses("fooRs", createReplicaSetProcesses("fooRs"))
5757

5858
assert.Len(t, d.getProcesses(), 3)
59-
assert.Len(t, d.getReplicaSets(), 1)
60-
assert.Len(t, d.getReplicaSets()[0].Members(), 3)
61-
assert.Equal(t, d.getReplicaSets()[0], expectedRs.Rs)
59+
assert.Len(t, d.GetReplicaSets(), 1)
60+
assert.Len(t, d.GetReplicaSets()[0].Members(), 3)
61+
assert.Equal(t, d.GetReplicaSets()[0], expectedRs.Rs)
6262

6363
// Now the deployment "gets updated" from external - new node is added and one is removed - this should be fixed
6464
// by merge
6565
newProcess := NewMongodProcess("foo", "bar", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, &mdbv1.NewStandaloneBuilder().Build().Spec, "", nil, "")
6666

6767
d.getProcesses()[0]["processType"] = ProcessTypeMongos // this will be overriden
6868
d.getProcesses()[1].EnsureNetConfig()["MaxIncomingConnections"] = 20 // this will be left as-is
69-
d.getReplicaSets()[0]["protocolVersion"] = 10 // this field will be overriden by Operator
70-
d.getReplicaSets()[0].setMembers(d.getReplicaSets()[0].Members()[0:2]) // "removing" the last node in replicaset
71-
d.getReplicaSets()[0].addMember(newProcess, "", automationconfig.MemberOptions{}) // "adding" some new node
72-
d.getReplicaSets()[0].Members()[0]["arbiterOnly"] = true // changing data for first node
69+
d.GetReplicaSets()[0]["protocolVersion"] = 10 // this field will be overriden by Operator
70+
d.GetReplicaSets()[0].setMembers(d.GetReplicaSets()[0].Members()[0:2]) // "removing" the last node in replicaset
71+
d.GetReplicaSets()[0].addMember(newProcess, "", automationconfig.MemberOptions{}) // "adding" some new node
72+
d.GetReplicaSets()[0].Members()[0]["arbiterOnly"] = true // changing data for first node
7373

7474
mergeReplicaSet(d, "fooRs", createReplicaSetProcesses("fooRs"))
7575

7676
assert.Len(t, d.getProcesses(), 3)
77-
assert.Len(t, d.getReplicaSets(), 1)
77+
assert.Len(t, d.GetReplicaSets(), 1)
7878

7979
expectedRs = buildRsByProcesses("fooRs", createReplicaSetProcesses("fooRs"))
8080
expectedRs.Rs.Members()[0]["arbiterOnly"] = true
@@ -89,14 +89,14 @@ func TestMergeReplica_ScaleDown(t *testing.T) {
8989

9090
mergeReplicaSet(d, "someRs", createReplicaSetProcesses("someRs"))
9191
assert.Len(t, d.getProcesses(), 3)
92-
assert.Len(t, d.getReplicaSets()[0].Members(), 3)
92+
assert.Len(t, d.GetReplicaSets()[0].Members(), 3)
9393

9494
// "scale down"
9595
scaledDownRsProcesses := createReplicaSetProcesses("someRs")[0:2]
9696
mergeReplicaSet(d, "someRs", scaledDownRsProcesses)
9797

9898
assert.Len(t, d.getProcesses(), 2)
99-
assert.Len(t, d.getReplicaSets()[0].Members(), 2)
99+
assert.Len(t, d.GetReplicaSets()[0].Members(), 2)
100100

101101
// checking that the last member was removed
102102
rsProcesses := buildRsByProcesses("someRs", createReplicaSetProcesses("someRs")).Processes
@@ -123,7 +123,7 @@ func TestMergeReplicaSet_MergeFirstProcess(t *testing.T) {
123123
mergeReplicaSet(d, "fooRs", createReplicaSetProcessesCount(5, "fooRs"))
124124

125125
assert.Len(t, d.getProcesses(), 8)
126-
assert.Len(t, d.getReplicaSets(), 2)
126+
assert.Len(t, d.GetReplicaSets(), 2)
127127

128128
expectedRs := buildRsByProcesses("fooRs", createReplicaSetProcessesCount(5, "fooRs"))
129129

@@ -177,8 +177,8 @@ func TestMergeDeployment_BigReplicaset(t *testing.T) {
177177
checkNumberOfVotingMembers(t, rs, 7, 8)
178178

179179
// Now OM user "has changed" votes for some of the members - this must stay the same after merge
180-
omDeployment.getReplicaSets()[0].Members()[2].setVotes(0).setPriority(0)
181-
omDeployment.getReplicaSets()[0].Members()[4].setVotes(0).setPriority(0)
180+
omDeployment.GetReplicaSets()[0].Members()[2].setVotes(0).setPriority(0)
181+
omDeployment.GetReplicaSets()[0].Members()[4].setVotes(0).setPriority(0)
182182

183183
omDeployment.MergeReplicaSet(rs, nil, nil, zap.S())
184184
checkNumberOfVotingMembers(t, rs, 5, 8)
@@ -199,10 +199,10 @@ func TestMergeDeployment_BigReplicaset(t *testing.T) {
199199

200200
omDeployment.MergeReplicaSet(rsToMerge, nil, nil, zap.S())
201201
checkNumberOfVotingMembers(t, rs, 7, 11)
202-
assert.Equal(t, 0, omDeployment.getReplicaSets()[0].Members()[2].Votes())
203-
assert.Equal(t, 0, omDeployment.getReplicaSets()[0].Members()[4].Votes())
204-
assert.Equal(t, float32(0), omDeployment.getReplicaSets()[0].Members()[2].Priority())
205-
assert.Equal(t, float32(0), omDeployment.getReplicaSets()[0].Members()[4].Priority())
202+
assert.Equal(t, 0, omDeployment.GetReplicaSets()[0].Members()[2].Votes())
203+
assert.Equal(t, 0, omDeployment.GetReplicaSets()[0].Members()[4].Votes())
204+
assert.Equal(t, float32(0), omDeployment.GetReplicaSets()[0].Members()[2].Priority())
205+
assert.Equal(t, float32(0), omDeployment.GetReplicaSets()[0].Members()[4].Priority())
206206
}
207207

208208
func TestGetAllProcessNames_MergedReplicaSetsAndShardedClusters(t *testing.T) {
@@ -360,7 +360,7 @@ func TestGetNumberOfExcessProcesses_ShardedClusterScaleDown(t *testing.T) {
360360
_, err := d.MergeShardedCluster(mergeOpts)
361361
assert.NoError(t, err)
362362
assert.Len(t, d.getShardedClusterByName("sc001").shards(), 3)
363-
assert.Len(t, d.getReplicaSets(), 4)
363+
assert.Len(t, d.GetReplicaSets(), 4)
364364
assert.Equal(t, 0, d.GetNumberOfExcessProcesses("sc001"))
365365

366366
// Now we are "scaling down" the sharded cluster - so junk replica sets will appear - this is still ok
@@ -377,7 +377,7 @@ func TestGetNumberOfExcessProcesses_ShardedClusterScaleDown(t *testing.T) {
377377
_, err = d.MergeShardedCluster(mergeOpts)
378378
assert.NoError(t, err)
379379
assert.Len(t, d.getShardedClusterByName("sc001").shards(), 2)
380-
assert.Len(t, d.getReplicaSets(), 4)
380+
assert.Len(t, d.GetReplicaSets(), 4)
381381

382382
assert.Equal(t, 0, d.GetNumberOfExcessProcesses("sc001"))
383383
}
@@ -586,7 +586,7 @@ func checkShardedClusterCheckExtraReplicaSets(t *testing.T, d Deployment, expect
586586
// checking that no previous replica sets are left. For this we take the name of first shard and remove the last digit
587587
firstShardName := expectedReplicaSets[0].Rs.Name()
588588
i := 0
589-
for _, r := range d.getReplicaSets() {
589+
for _, r := range d.GetReplicaSets() {
590590
if strings.HasPrefix(r.Name(), firstShardName[0:len(firstShardName)-1]) {
591591
i++
592592
}

controllers/om/depshardedcluster_test.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,9 @@ func TestMergeShardedCluster_New(t *testing.T) {
3131
assert.NoError(t, err)
3232

3333
require.Len(t, d.getProcesses(), 15)
34-
require.Len(t, d.getReplicaSets(), 4)
34+
require.Len(t, d.GetReplicaSets(), 4)
3535
for i := 0; i < 4; i++ {
36-
require.Len(t, d.getReplicaSets()[i].Members(), 3)
36+
require.Len(t, d.GetReplicaSets()[i].Members(), 3)
3737
}
3838
checkMongoSProcesses(t, d.getProcesses(), createMongosProcesses(3, "pretty", "cluster"))
3939
checkReplicaSet(t, d, createConfigSrvRs("configSrv", true))
@@ -130,9 +130,9 @@ func TestMergeShardedCluster_ReplicaSetsModified(t *testing.T) {
130130
expectedShards[0].Rs["writeConcernMajorityJournalDefault"] = true
131131

132132
require.Len(t, d.getProcesses(), 15)
133-
require.Len(t, d.getReplicaSets(), 4)
133+
require.Len(t, d.GetReplicaSets(), 4)
134134
for i := 0; i < 4; i++ {
135-
require.Len(t, d.getReplicaSets()[i].Members(), 3)
135+
require.Len(t, d.GetReplicaSets()[i].Members(), 3)
136136
}
137137
checkMongoSProcesses(t, d.getProcesses(), createMongosProcesses(3, "pretty", "cluster"))
138138
checkReplicaSet(t, d, createConfigSrvRs("configSrv", true))
@@ -166,7 +166,7 @@ func TestMergeShardedCluster_ShardedClusterModified(t *testing.T) {
166166

167167
mergeReplicaSet(d, "fakeShard", createReplicaSetProcesses("fakeShard"))
168168

169-
require.Len(t, d.getReplicaSets(), 5)
169+
require.Len(t, d.GetReplicaSets(), 5)
170170

171171
// Final check - we create the expected configuration, add there correct OM changes and check for equality with merge
172172
// result
@@ -188,9 +188,9 @@ func TestMergeShardedCluster_ShardedClusterModified(t *testing.T) {
188188
// Note, that fake replicaset and it's processes haven't disappeared as we passed 'false' to 'MergeShardedCluster'
189189
// which results in "draining" for redundant shards but not physical removal of replica sets
190190
require.Len(t, d.getProcesses(), 18)
191-
require.Len(t, d.getReplicaSets(), 5)
191+
require.Len(t, d.GetReplicaSets(), 5)
192192
for i := 0; i < 4; i++ {
193-
require.Len(t, d.getReplicaSets()[i].Members(), 3)
193+
require.Len(t, d.GetReplicaSets()[i].Members(), 3)
194194
}
195195
checkMongoSProcesses(t, d.getProcesses(), createMongosProcesses(3, "pretty", "cluster"))
196196
checkReplicaSet(t, d, createConfigSrvRs("configSrv", true))

controllers/om/mockedomclient.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -734,7 +734,7 @@ func (oc *MockedOmConnection) CheckResourcesAndBackupDeleted(t *testing.T, resou
734734
// This can be improved for some more complicated scenarios when we have different resources in parallel - so far
735735
// just checking if deployment
736736
assert.Empty(t, oc.deployment.getProcesses())
737-
assert.Empty(t, oc.deployment.getReplicaSets())
737+
assert.Empty(t, oc.deployment.GetReplicaSets())
738738
assert.Empty(t, oc.deployment.getShardedClusters())
739739
assert.Empty(t, oc.deployment.getMonitoringVersions())
740740
assert.Empty(t, oc.deployment.getBackupVersions())

controllers/om/omclient.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -978,6 +978,21 @@ func (oc *HTTPOmConnection) AddPreferredHostname(agentApiKey string, value strin
978978
return nil
979979
}
980980

981+
func GetReplicaSetMemberIds(conn Connection) (map[string]map[string]int, error) {
982+
dep, err := conn.ReadDeployment()
983+
if err != nil {
984+
return nil, err
985+
}
986+
987+
finalProcessIds := make(map[string]map[string]int)
988+
989+
for _, replicaSet := range dep.GetReplicaSets() {
990+
finalProcessIds[replicaSet.Name()] = replicaSet.MemberIds()
991+
}
992+
993+
return finalProcessIds, nil
994+
}
995+
981996
//********************************** Private methods *******************************************************************
982997

983998
func (oc *HTTPOmConnection) get(path string) ([]byte, error) {

controllers/om/replicaset.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,14 @@ func (r ReplicaSet) String() string {
146146
return fmt.Sprintf("\"%s\" (members: %v)", r.Name(), r.Members())
147147
}
148148

149+
func (r ReplicaSet) MemberIds() map[string]int {
150+
memberIds := make(map[string]int)
151+
for _, rsMember := range r.Members() {
152+
memberIds[rsMember.Name()] = rsMember.Id()
153+
}
154+
return memberIds
155+
}
156+
149157
// ***************************************** Private methods ***********************************************************
150158

151159
func initDefaultRs(set ReplicaSet, name string, protocolVersion string) {

controllers/operator/mongodbmultireplicaset_controller.go

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -201,8 +201,13 @@ func (r *ReconcileMongoDbMultiReplicaSet) Reconcile(ctx context.Context, request
201201
return r.updateStatus(ctx, &mrs, status, log)
202202
}
203203

204+
finalMemberIds, err := om.GetReplicaSetMemberIds(conn)
205+
if err != nil {
206+
return r.updateStatus(ctx, &mrs, workflow.Failed(err), log)
207+
}
208+
204209
mrs.Status.FeatureCompatibilityVersion = mrs.CalculateFeatureCompatibilityVersion()
205-
if err := r.saveLastAchievedSpec(ctx, mrs); err != nil {
210+
if err := r.saveLastAchievedSpec(ctx, mrs, finalMemberIds); err != nil {
206211
return r.updateStatus(ctx, &mrs, workflow.Failed(xerrors.Errorf("Failed to set annotation: %w", err)), log)
207212
}
208213

@@ -627,7 +632,7 @@ func getMembersForClusterSpecItemThisReconciliation(mrs *mdbmultiv1.MongoDBMulti
627632
}
628633

629634
// saveLastAchievedSpec updates the MongoDBMultiCluster resource with the spec that was just achieved.
630-
func (r *ReconcileMongoDbMultiReplicaSet) saveLastAchievedSpec(ctx context.Context, mrs mdbmultiv1.MongoDBMultiCluster) error {
635+
func (r *ReconcileMongoDbMultiReplicaSet) saveLastAchievedSpec(ctx context.Context, mrs mdbmultiv1.MongoDBMultiCluster, rsMemberIds map[string]map[string]int) error {
631636
clusterSpecs, err := mrs.GetClusterSpecItems()
632637
if err != nil {
633638
return err
@@ -657,6 +662,16 @@ func (r *ReconcileMongoDbMultiReplicaSet) saveLastAchievedSpec(ctx context.Conte
657662
annotationsToAdd[mdbmultiv1.LastClusterNumMapping] = string(clusterNumBytes)
658663
}
659664

665+
if len(rsMemberIds) > 0 {
666+
rsMemberIdsBytes, err := json.Marshal(rsMemberIds)
667+
if err != nil {
668+
return err
669+
}
670+
if len(rsMemberIdsBytes) > 0 {
671+
annotationsToAdd[util.LastAchievedRsMemberIds] = string(rsMemberIdsBytes)
672+
}
673+
}
674+
660675
return annotations.SetAnnotations(ctx, &mrs, annotationsToAdd, r.client)
661676
}
662677

@@ -699,6 +714,15 @@ func (r *ReconcileMongoDbMultiReplicaSet) updateOmDeploymentRs(ctx context.Conte
699714
}
700715

701716
processIds := getReplicaSetProcessIdsFromReplicaSets(mrs.Name, existingDeployment)
717+
718+
// If there is no replicaset configuration saved in OM, it might be a new project, so we check the ids saved in annotation
719+
// A project migration can happen if .spec.opsManager.configMapRef is changed, or the original configMap has been modified.
720+
if len(processIds) == 0 {
721+
processIds, err = getReplicaSetProcessIdsFromAnnotation(mrs)
722+
if err != nil {
723+
return xerrors.Errorf("failed to get member ids from annotation: %w", err)
724+
}
725+
}
702726
log.Debugf("Existing process Ids: %+v", processIds)
703727

704728
certificateFileName := ""
@@ -794,6 +818,17 @@ func getReplicaSetProcessIdsFromReplicaSets(replicaSetName string, deployment om
794818
return processIds
795819
}
796820

821+
func getReplicaSetProcessIdsFromAnnotation(mrs mdbmultiv1.MongoDBMultiCluster) (map[string]int, error) {
822+
if processIdsStr, ok := mrs.Annotations[util.LastAchievedRsMemberIds]; ok {
823+
processIds := make(map[string]map[string]int)
824+
if err := json.Unmarshal([]byte(processIdsStr), &processIds); err != nil {
825+
return map[string]int{}, err
826+
}
827+
return processIds[mrs.Name], nil
828+
}
829+
return make(map[string]int), nil
830+
}
831+
797832
func getSRVService(mrs *mdbmultiv1.MongoDBMultiCluster) corev1.Service {
798833
additionalConfig := mrs.Spec.GetAdditionalMongodConfig()
799834
port := additionalConfig.GetPortOrDefault()

0 commit comments

Comments
 (0)