Skip to content

Remove deprecated ingester metrics #5472

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 24, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 0 additions & 94 deletions pkg/ingester/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ const (
)

type ingesterMetrics struct {
flushQueueLength prometheus.Gauge
ingestedSamples prometheus.Counter
ingestedExemplars prometheus.Counter
ingestedMetadata prometheus.Counter
Expand All @@ -37,25 +36,6 @@ type ingesterMetrics struct {
memMetadataCreatedTotal *prometheus.CounterVec
memSeriesRemovedTotal *prometheus.CounterVec
memMetadataRemovedTotal *prometheus.CounterVec
createdChunks prometheus.Counter
walReplayDuration prometheus.Gauge
walCorruptionsTotal prometheus.Counter

// Chunks transfer.
sentChunks prometheus.Counter
receivedChunks prometheus.Counter

// Chunks flushing.
flushSeriesInProgress prometheus.Gauge
chunkUtilization prometheus.Histogram
chunkLength prometheus.Histogram
chunkSize prometheus.Histogram
chunkAge prometheus.Histogram
memoryChunks prometheus.Gauge
seriesEnqueuedForFlush *prometheus.CounterVec
seriesDequeuedOutcome *prometheus.CounterVec
droppedChunks prometheus.Counter
oldestUnflushedChunkTimestamp prometheus.Gauge

activeSeriesPerUser *prometheus.GaugeVec

Expand All @@ -76,10 +56,6 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD
)

m := &ingesterMetrics{
flushQueueLength: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "cortex_ingester_flush_queue_length",
Help: "The total number of series pending in the flush queue.",
}),
ingestedSamples: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_ingested_samples_total",
Help: "The total number of samples ingested.",
Expand Down Expand Up @@ -144,18 +120,6 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD
Name: "cortex_ingester_memory_users",
Help: "The current number of users in memory.",
}),
createdChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_chunks_created_total",
Help: "The total number of chunks the ingester has created.",
}),
walReplayDuration: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "cortex_ingester_wal_replay_duration_seconds",
Help: "Time taken to replay the checkpoint and the WAL.",
}),
walCorruptionsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_wal_corruptions_total",
Help: "Total number of WAL corruptions encountered.",
}),
memMetadataCreatedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_ingester_memory_metadata_created_total",
Help: "The total number of metadata that were created per user",
Expand All @@ -165,64 +129,6 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD
Help: "The total number of metadata that were removed per user.",
}, []string{"user"}),

// Chunks / blocks transfer.
sentChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_sent_chunks",
Help: "The total number of chunks sent by this ingester whilst leaving.",
}),
receivedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_received_chunks",
Help: "The total number of chunks received by this ingester whilst joining",
}),

// Chunks flushing.
flushSeriesInProgress: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "cortex_ingester_flush_series_in_progress",
Help: "Number of flush series operations in progress.",
}),
chunkUtilization: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_ingester_chunk_utilization",
Help: "Distribution of stored chunk utilization (when stored).",
Buckets: prometheus.LinearBuckets(0, 0.2, 6),
}),
chunkLength: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_ingester_chunk_length",
Help: "Distribution of stored chunk lengths (when stored).",
Buckets: prometheus.ExponentialBuckets(5, 2, 11), // biggest bucket is 5*2^(11-1) = 5120
}),
chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_ingester_chunk_size_bytes",
Help: "Distribution of stored chunk sizes (when stored).",
Buckets: prometheus.ExponentialBuckets(500, 2, 7), // biggest bucket is 500*2^(7-1) = 32000
}),
chunkAge: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_ingester_chunk_age_seconds",
Help: "Distribution of chunk ages (when stored).",
// with default settings chunks should flush between 5 min and 12 hours
// so buckets at 1min, 5min, 10min, 30min, 1hr, 2hr, 4hr, 10hr, 12hr, 16hr
Buckets: []float64{60, 300, 600, 1800, 3600, 7200, 14400, 36000, 43200, 57600},
}),
memoryChunks: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "cortex_ingester_memory_chunks",
Help: "The total number of chunks in memory.",
}),
seriesEnqueuedForFlush: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_ingester_flushing_enqueued_series_total",
Help: "Total number of series enqueued for flushing, with reasons.",
}, []string{"reason"}),
seriesDequeuedOutcome: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_ingester_flushing_dequeued_series_total",
Help: "Total number of series dequeued for flushing, with outcome (superset of enqueue reasons)",
}, []string{"outcome"}),
droppedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingester_dropped_chunks_total",
Help: "Total number of chunks dropped from flushing because they have too few samples.",
}),
oldestUnflushedChunkTimestamp: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "cortex_oldest_unflushed_chunk_timestamp_seconds",
Help: "Unix timestamp of the oldest unflushed chunk in the memory",
}),

maxUsersGauge: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{
Name: instanceLimits,
Help: instanceLimitsHelp,
Expand Down