Skip to content

Commit daf1f83

Browse files
committed
Add new ring method to get all instances and created a new method
in ruler to get Replicaset without requiring quorum Signed-off-by: Emmanuel Lodovice <[email protected]>
1 parent 00c188e commit daf1f83

File tree

9 files changed

+384
-122
lines changed

9 files changed

+384
-122
lines changed

docs/configuration/config-file-reference.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4268,7 +4268,7 @@ ring:
42684268
# with default state (state before any evaluation) and send this copy in list
42694269
# API requests as backup in case the ruler who owns the rule fails to send its
42704270
# rules. This allows the rules API to handle ruler outage by returning rules
4271-
# with default state. Ring replication-factor needs to be set to 3 or more for
4271+
# with default state. Ring replication-factor needs to be set to 2 or more for
42724272
# this to be useful.
42734273
# CLI flag: -experimental.ruler.api-enable-rules-backup
42744274
[api_enable_rules_backup: <boolean> | default = false]

pkg/compactor/shuffle_sharding_grouper_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -766,14 +766,14 @@ func (r *RingMock) GetInstanceDescsForOperation(op ring.Operation) (map[string]r
766766
return args.Get(0).(map[string]ring.InstanceDesc), args.Error(1)
767767
}
768768

769-
func (r *RingMock) GetReplicationSetForOperation(op ring.Operation) (ring.ReplicationSet, error) {
769+
func (r *RingMock) GetAllInstanceDescs(op ring.Operation) ([]ring.InstanceDesc, []ring.InstanceDesc, error) {
770770
args := r.Called(op)
771-
return args.Get(0).(ring.ReplicationSet), args.Error(1)
771+
return args.Get(0).([]ring.InstanceDesc), make([]ring.InstanceDesc, 0), args.Error(1)
772772
}
773773

774-
func (r *RingMock) GetReplicationSetForOperationWithNoQuorum(op ring.Operation) (ring.ReplicationSet, map[string]struct{}, error) {
774+
func (r *RingMock) GetReplicationSetForOperation(op ring.Operation) (ring.ReplicationSet, error) {
775775
args := r.Called(op)
776-
return args.Get(0).(ring.ReplicationSet), make(map[string]struct{}), args.Error(1)
776+
return args.Get(0).(ring.ReplicationSet), args.Error(1)
777777
}
778778

779779
func (r *RingMock) ReplicationFactor() int {

pkg/ring/ring.go

Lines changed: 42 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ type ReadRing interface {
5050
// of unhealthy instances is greater than the tolerated max unavailable.
5151
GetAllHealthy(op Operation) (ReplicationSet, error)
5252

53+
// GetAllInstanceDescs returns a slice of healthy and unhealthy InstanceDesc.
54+
GetAllInstanceDescs(op Operation) ([]InstanceDesc, []InstanceDesc, error)
55+
5356
// GetInstanceDescsForOperation returns map of InstanceDesc with instance ID as the keys.
5457
GetInstanceDescsForOperation(op Operation) (map[string]InstanceDesc, error)
5558

@@ -59,13 +62,6 @@ type ReadRing interface {
5962
// the input operation.
6063
GetReplicationSetForOperation(op Operation) (ReplicationSet, error)
6164

62-
// GetReplicationSetForOperationWithNoQuorum returns all instances where the input operation should be executed.
63-
// The resulting ReplicationSet contains all healthy instances in the ring, but the computation for MaxErrors
64-
// does not require quorum so only 1 replica is needed to complete the operation. For MaxUnavailableZones, it is
65-
// not automatically reduced when there are unhealthy instances in a zone because healthy instances in the zone
66-
// are still returned, but the information about zones with unhealthy instances is returned.
67-
GetReplicationSetForOperationWithNoQuorum(op Operation) (ReplicationSet, map[string]struct{}, error)
68-
6965
ReplicationFactor() int
7066

7167
// InstancesCount returns the number of instances in the ring.
@@ -471,6 +467,28 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) {
471467
}, nil
472468
}
473469

470+
// GetAllInstanceDescs implements ReadRing.
471+
func (r *Ring) GetAllInstanceDescs(op Operation) ([]InstanceDesc, []InstanceDesc, error) {
472+
r.mtx.RLock()
473+
defer r.mtx.RUnlock()
474+
475+
if r.ringDesc == nil || len(r.ringDesc.Ingesters) == 0 {
476+
return nil, nil, ErrEmptyRing
477+
}
478+
healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters))
479+
unhealthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters))
480+
storageLastUpdate := r.KVClient.LastUpdateTime(r.key)
481+
for _, instance := range r.ringDesc.Ingesters {
482+
if r.IsHealthy(&instance, op, storageLastUpdate) {
483+
healthyInstances = append(healthyInstances, instance)
484+
} else {
485+
unhealthyInstances = append(unhealthyInstances, instance)
486+
}
487+
}
488+
489+
return healthyInstances, unhealthyInstances, nil
490+
}
491+
474492
// GetInstanceDescsForOperation implements ReadRing.
475493
func (r *Ring) GetInstanceDescsForOperation(op Operation) (map[string]InstanceDesc, error) {
476494
r.mtx.RLock()
@@ -491,12 +509,13 @@ func (r *Ring) GetInstanceDescsForOperation(op Operation) (map[string]InstanceDe
491509
return instanceDescs, nil
492510
}
493511

494-
func (r *Ring) getReplicationSetForOperation(op Operation, requireQuorum bool) (ReplicationSet, map[string]struct{}, error) {
512+
// GetReplicationSetForOperation implements ReadRing.
513+
func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) {
495514
r.mtx.RLock()
496515
defer r.mtx.RUnlock()
497516

498517
if r.ringDesc == nil || len(r.ringTokens) == 0 {
499-
return ReplicationSet{}, make(map[string]struct{}), ErrEmptyRing
518+
return ReplicationSet{}, ErrEmptyRing
500519
}
501520

502521
// Build the initial replication set, excluding unhealthy instances.
@@ -518,24 +537,18 @@ func (r *Ring) getReplicationSetForOperation(op Operation, requireQuorum bool) (
518537
maxUnavailableZones := 0
519538

520539
if r.cfg.ZoneAwarenessEnabled {
540+
// Given data is replicated to RF different zones, we can tolerate a number of
541+
// RF/2 failing zones. However, we need to protect from the case the ring currently
542+
// contains instances in a number of zones < RF.
521543
numReplicatedZones := utilmath.Min(len(r.ringZones), r.cfg.ReplicationFactor)
522-
if requireQuorum {
523-
// Given data is replicated to RF different zones, we can tolerate a number of
524-
// RF/2 failing zones. However, we need to protect from the case the ring currently
525-
// contains instances in a number of zones < RF.
526-
minSuccessZones := (numReplicatedZones / 2) + 1
527-
maxUnavailableZones = minSuccessZones - 1
528-
} else {
529-
// Given that quorum is not required, we only need at least one of the zone to be healthy to succeed. But we
530-
// also need to handle case when RF < number of zones.
531-
maxUnavailableZones = numReplicatedZones - 1
532-
}
544+
minSuccessZones := (numReplicatedZones / 2) + 1
545+
maxUnavailableZones = minSuccessZones - 1
533546

534547
if len(zoneFailures) > maxUnavailableZones {
535-
return ReplicationSet{}, zoneFailures, ErrTooManyUnhealthyInstances
548+
return ReplicationSet{}, ErrTooManyUnhealthyInstances
536549
}
537550

538-
if requireQuorum && len(zoneFailures) > 0 {
551+
if len(zoneFailures) > 0 {
539552
// We remove all instances (even healthy ones) from zones with at least
540553
// 1 failing instance. Due to how replication works when zone-awareness is
541554
// enabled (data is replicated to RF different zones), there's no benefit in
@@ -549,11 +562,11 @@ func (r *Ring) getReplicationSetForOperation(op Operation, requireQuorum bool) (
549562
}
550563

551564
healthyInstances = filteredInstances
552-
553-
// Since we removed all instances from zones containing at least 1 failing
554-
// instance, we have to decrease the max unavailable zones accordingly.
555-
maxUnavailableZones -= len(zoneFailures)
556565
}
566+
567+
// Since we removed all instances from zones containing at least 1 failing
568+
// instance, we have to decrease the max unavailable zones accordingly.
569+
maxUnavailableZones -= len(zoneFailures)
557570
} else {
558571
// Calculate the number of required instances;
559572
// ensure we always require at least RF-1 when RF=3.
@@ -562,15 +575,10 @@ func (r *Ring) getReplicationSetForOperation(op Operation, requireQuorum bool) (
562575
numRequired = r.cfg.ReplicationFactor
563576
}
564577
// We can tolerate this many failures
565-
if requireQuorum {
566-
numRequired -= r.cfg.ReplicationFactor / 2
567-
} else {
568-
// if quorum is not required then 1 replica is enough to handle the request
569-
numRequired -= r.cfg.ReplicationFactor - 1
570-
}
578+
numRequired -= r.cfg.ReplicationFactor / 2
571579

572580
if len(healthyInstances) < numRequired {
573-
return ReplicationSet{}, zoneFailures, ErrTooManyUnhealthyInstances
581+
return ReplicationSet{}, ErrTooManyUnhealthyInstances
574582
}
575583

576584
maxErrors = len(healthyInstances) - numRequired
@@ -580,18 +588,7 @@ func (r *Ring) getReplicationSetForOperation(op Operation, requireQuorum bool) (
580588
Instances: healthyInstances,
581589
MaxErrors: maxErrors,
582590
MaxUnavailableZones: maxUnavailableZones,
583-
}, zoneFailures, nil
584-
}
585-
586-
// GetReplicationSetForOperation implements ReadRing.
587-
func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) {
588-
replicationSet, _, err := r.getReplicationSetForOperation(op, true)
589-
return replicationSet, err
590-
}
591-
592-
// GetReplicationSetForOperationWithNoQuorum implements ReadRing.
593-
func (r *Ring) GetReplicationSetForOperationWithNoQuorum(op Operation) (ReplicationSet, map[string]struct{}, error) {
594-
return r.getReplicationSetForOperation(op, false)
591+
}, nil
595592
}
596593

597594
// countTokens returns the number of tokens and tokens within the range for each instance.

pkg/ring/ring_test.go

Lines changed: 42 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -959,11 +959,45 @@ func TestRing_GetInstanceDescsForOperation(t *testing.T) {
959959
}, instanceDescs)
960960
}
961961

962-
func validateGetReplicationSetForOperation(t *testing.T, requireQuorum bool) {
962+
func TestRing_GetAllInstanceDescs(t *testing.T) {
963+
now := time.Now().Unix()
964+
twoMinutesAgo := time.Now().Add(-2 * time.Minute).Unix()
965+
966+
ringDesc := &Desc{Ingesters: map[string]InstanceDesc{
967+
"instance-1": {Addr: "127.0.0.1", Tokens: []uint32{1}, State: ACTIVE, Timestamp: now},
968+
"instance-2": {Addr: "127.0.0.2", Tokens: []uint32{2}, State: LEAVING, Timestamp: now}, // not healthy state
969+
"instance-3": {Addr: "127.0.0.3", Tokens: []uint32{3}, State: ACTIVE, Timestamp: twoMinutesAgo}, // heartbeat timed out
970+
}}
971+
972+
ring := Ring{
973+
cfg: Config{HeartbeatTimeout: time.Minute},
974+
ringDesc: ringDesc,
975+
ringTokens: ringDesc.GetTokens(),
976+
ringTokensByZone: ringDesc.getTokensByZone(),
977+
ringInstanceByToken: ringDesc.getTokensInfo(),
978+
ringZones: getZones(ringDesc.getTokensByZone()),
979+
strategy: NewDefaultReplicationStrategy(),
980+
KVClient: &MockClient{},
981+
}
982+
983+
testOp := NewOp([]InstanceState{ACTIVE}, nil)
984+
985+
healthyInstanceDescs, unhealthyInstanceDescs, err := ring.GetAllInstanceDescs(testOp)
986+
require.NoError(t, err)
987+
require.EqualValues(t, []InstanceDesc{
988+
{Addr: "127.0.0.1", Tokens: []uint32{1}, State: ACTIVE, Timestamp: now},
989+
}, healthyInstanceDescs)
990+
require.EqualValues(t, []InstanceDesc{
991+
{Addr: "127.0.0.2", Tokens: []uint32{2}, State: LEAVING, Timestamp: now},
992+
{Addr: "127.0.0.3", Tokens: []uint32{3}, State: ACTIVE, Timestamp: twoMinutesAgo},
993+
}, unhealthyInstanceDescs)
994+
}
995+
996+
func TestRing_GetReplicationSetForOperation(t *testing.T) {
963997
now := time.Now()
964998
g := NewRandomTokenGenerator()
965999

966-
type testCase struct {
1000+
tests := map[string]struct {
9671001
ringInstances map[string]InstanceDesc
9681002
ringHeartbeatTimeout time.Duration
9691003
ringReplicationFactor int
@@ -973,9 +1007,7 @@ func validateGetReplicationSetForOperation(t *testing.T, requireQuorum bool) {
9731007
expectedSetForWrite []string
9741008
expectedErrForReporting error
9751009
expectedSetForReporting []string
976-
}
977-
978-
tests := map[string]testCase{
1010+
}{
9791011
"should return error on empty ring": {
9801012
ringInstances: nil,
9811013
ringHeartbeatTimeout: time.Minute,
@@ -1040,10 +1072,7 @@ func validateGetReplicationSetForOperation(t *testing.T, requireQuorum bool) {
10401072
expectedSetForWrite: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"},
10411073
expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"},
10421074
},
1043-
}
1044-
1045-
if requireQuorum {
1046-
tests["should fail on 2 unhealthy instances and RF=3"] = testCase{
1075+
"should fail on 2 unhealthy instances and RF=3": {
10471076
ringInstances: map[string]InstanceDesc{
10481077
"instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-1", "", 128, true)},
10491078
"instance-2": {Addr: "127.0.0.2", State: ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: g.GenerateTokens(NewDesc(), "instance-2", "", 128, true)},
@@ -1056,36 +1085,7 @@ func validateGetReplicationSetForOperation(t *testing.T, requireQuorum bool) {
10561085
expectedErrForRead: ErrTooManyUnhealthyInstances,
10571086
expectedErrForWrite: ErrTooManyUnhealthyInstances,
10581087
expectedErrForReporting: ErrTooManyUnhealthyInstances,
1059-
}
1060-
} else {
1061-
tests["should pass on 2 unhealthy instances and RF=3"] = testCase{
1062-
ringInstances: map[string]InstanceDesc{
1063-
"instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Unix(), Tokens: GenerateTokens(128, nil)},
1064-
"instance-2": {Addr: "127.0.0.2", State: ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)},
1065-
"instance-3": {Addr: "127.0.0.3", State: ACTIVE, Timestamp: now.Add(-20 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)},
1066-
"instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)},
1067-
"instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)},
1068-
},
1069-
ringHeartbeatTimeout: time.Minute,
1070-
ringReplicationFactor: 3,
1071-
expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"},
1072-
expectedSetForWrite: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"},
1073-
expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"},
1074-
}
1075-
tests["should fail on 3 unhealthy instances and RF=3"] = testCase{
1076-
ringInstances: map[string]InstanceDesc{
1077-
"instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Unix(), Tokens: GenerateTokens(128, nil)},
1078-
"instance-2": {Addr: "127.0.0.2", State: ACTIVE, Timestamp: now.Add(-10 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)},
1079-
"instance-3": {Addr: "127.0.0.3", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)},
1080-
"instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)},
1081-
"instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)},
1082-
},
1083-
ringHeartbeatTimeout: time.Minute,
1084-
ringReplicationFactor: 3,
1085-
expectedErrForRead: ErrTooManyUnhealthyInstances,
1086-
expectedErrForWrite: ErrTooManyUnhealthyInstances,
1087-
expectedErrForReporting: ErrTooManyUnhealthyInstances,
1088-
}
1088+
},
10891089
}
10901090

10911091
for testName, testData := range tests {
@@ -1110,44 +1110,21 @@ func validateGetReplicationSetForOperation(t *testing.T, requireQuorum bool) {
11101110
KVClient: &MockClient{},
11111111
}
11121112

1113-
var set ReplicationSet
1114-
var err error
1115-
1116-
if requireQuorum {
1117-
set, err = ring.GetReplicationSetForOperation(Read)
1118-
} else {
1119-
set, _, err = ring.GetReplicationSetForOperationWithNoQuorum(Read)
1120-
}
1113+
set, err := ring.GetReplicationSetForOperation(Read)
11211114
require.Equal(t, testData.expectedErrForRead, err)
11221115
assert.ElementsMatch(t, testData.expectedSetForRead, set.GetAddresses())
11231116

1124-
if requireQuorum {
1125-
set, err = ring.GetReplicationSetForOperation(Write)
1126-
} else {
1127-
set, _, err = ring.GetReplicationSetForOperationWithNoQuorum(Write)
1128-
}
1117+
set, err = ring.GetReplicationSetForOperation(Write)
11291118
require.Equal(t, testData.expectedErrForWrite, err)
11301119
assert.ElementsMatch(t, testData.expectedSetForWrite, set.GetAddresses())
11311120

1132-
if requireQuorum {
1133-
set, err = ring.GetReplicationSetForOperation(Reporting)
1134-
} else {
1135-
set, _, err = ring.GetReplicationSetForOperationWithNoQuorum(Reporting)
1136-
}
1121+
set, err = ring.GetReplicationSetForOperation(Reporting)
11371122
require.Equal(t, testData.expectedErrForReporting, err)
11381123
assert.ElementsMatch(t, testData.expectedSetForReporting, set.GetAddresses())
11391124
})
11401125
}
11411126
}
11421127

1143-
func TestRing_GetReplicationSetForOperation(t *testing.T) {
1144-
validateGetReplicationSetForOperation(t, true)
1145-
}
1146-
1147-
func TestRing_GetReplicationSetForOperationWithNoQuorum(t *testing.T) {
1148-
validateGetReplicationSetForOperation(t, false)
1149-
}
1150-
11511128
func TestRing_GetReplicationSetForOperation_WithZoneAwarenessEnabled(t *testing.T) {
11521129
g := NewRandomTokenGenerator()
11531130
tests := map[string]struct {

pkg/ring/util_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,14 @@ func (r *RingMock) GetInstanceDescsForOperation(op Operation) (map[string]Instan
3636
return args.Get(0).(map[string]InstanceDesc), args.Error(1)
3737
}
3838

39-
func (r *RingMock) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) {
39+
func (r *RingMock) GetAllInstanceDescs(op Operation) ([]InstanceDesc, []InstanceDesc, error) {
4040
args := r.Called(op)
41-
return args.Get(0).(ReplicationSet), args.Error(1)
41+
return args.Get(0).([]InstanceDesc), make([]InstanceDesc, 0), args.Error(1)
4242
}
4343

44-
func (r *RingMock) GetReplicationSetForOperationWithNoQuorum(op Operation) (ReplicationSet, map[string]struct{}, error) {
44+
func (r *RingMock) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) {
4545
args := r.Called(op)
46-
return args.Get(0).(ReplicationSet), make(map[string]struct{}), args.Error(1)
46+
return args.Get(0).(ReplicationSet), args.Error(1)
4747
}
4848

4949
func (r *RingMock) ReplicationFactor() int {

pkg/ruler/rule_backup_manager.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ func (r *rulesBackupManager) getRuleGroups(userID string) rulespb.RuleGroupList
5353
return result
5454
}
5555

56-
// getRuleGroups updates the ruler_backup_rule_group metric by adding new groups that were backed up and removing
56+
// updateMetrics updates the ruler_backup_rule_group metric by adding new groups that were backed up and removing
5757
// those that are removed from the backup.
5858
func (r *rulesBackupManager) updateMetrics(newBackupGroups map[string]rulespb.RuleGroupList) {
5959
for user, groups := range newBackupGroups {

pkg/ruler/ruler.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
198198
f.DurationVar(&cfg.FlushCheckPeriod, "ruler.flush-period", 1*time.Minute, "Period with which to attempt to flush rule groups.")
199199
f.StringVar(&cfg.RulePath, "ruler.rule-path", "/rules", "file path to store temporary rule files for the prometheus rule managers")
200200
f.BoolVar(&cfg.EnableAPI, "experimental.ruler.enable-api", false, "Enable the ruler api")
201-
f.BoolVar(&cfg.APIEnableRulesBackup, "experimental.ruler.api-enable-rules-backup", false, "EXPERIMENTAL: Enable rulers to store a copy of rules owned by other rulers with default state (state before any evaluation) and send this copy in list API requests as backup in case the ruler who owns the rule fails to send its rules. This allows the rules API to handle ruler outage by returning rules with default state. Ring replication-factor needs to be set to 3 or more for this to be useful.")
201+
f.BoolVar(&cfg.APIEnableRulesBackup, "experimental.ruler.api-enable-rules-backup", false, "EXPERIMENTAL: Enable rulers to store a copy of rules owned by other rulers with default state (state before any evaluation) and send this copy in list API requests as backup in case the ruler who owns the rule fails to send its rules. This allows the rules API to handle ruler outage by returning rules with default state. Ring replication-factor needs to be set to 2 or more for this to be useful.")
202202
f.BoolVar(&cfg.APIDeduplicateRules, "experimental.ruler.api-deduplicate-rules", false, "EXPERIMENTAL: Remove duplicate rules in the prometheus rules and alerts API response. If there are duplicate rules the rule with the latest evaluation timestamp will be kept.")
203203
f.DurationVar(&cfg.OutageTolerance, "ruler.for-outage-tolerance", time.Hour, `Max time to tolerate outage for restoring "for" state of alert.`)
204204
f.DurationVar(&cfg.ForGracePeriod, "ruler.for-grace-period", 10*time.Minute, `Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period.`)
@@ -1102,7 +1102,7 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest
11021102
ring = r.ring.ShuffleShard(userID, shardSize)
11031103
}
11041104

1105-
rulers, failedZones, err := ring.GetReplicationSetForOperationWithNoQuorum(ListRuleRingOp)
1105+
rulers, failedZones, err := GetReplicationSetForListRule(ring, &r.cfg.Ring)
11061106
if err != nil {
11071107
return nil, err
11081108
}

0 commit comments

Comments
 (0)