From 84eabc6540f621cd3bb96c8627c8094ca761c86a Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Mon, 10 Mar 2025 11:02:12 +0900 Subject: [PATCH 1/6] Support remote write v2 by converting request Signed-off-by: SungJin1212 --- .github/workflows/test-build-deploy.yml | 1 + CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 5 + docs/configuration/v1-guarantees.md | 1 + integration/e2e/util.go | 76 +++++ integration/e2ecortex/client.go | 34 ++ integration/remote_write_v2_test.go | 327 ++++++++++++++++++++ pkg/api/api.go | 8 +- pkg/cortexpb/cortex.proto | 8 + pkg/cortexpb/histograms.go | 34 ++ pkg/distributor/distributor.go | 30 +- pkg/distributor/write_stats.go | 62 ++++ pkg/distributor/write_stats_test.go | 41 +++ pkg/ingester/ingester.go | 8 +- pkg/util/push/push.go | 266 +++++++++++++++- pkg/util/push/push_test.go | 314 +++++++++++++++++-- 16 files changed, 1169 insertions(+), 47 deletions(-) create mode 100644 integration/remote_write_v2_test.go create mode 100644 pkg/distributor/write_stats.go create mode 100644 pkg/distributor/write_stats_test.go diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index a15cd009776..48005fa6d43 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -162,6 +162,7 @@ jobs: - integration_querier - integration_ruler - integration_query_fuzz + - integration_remote_write_v2 steps: - name: Upgrade golang uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index cfb1ec43241..946304faf3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ * [FEATURE] Querier/Ruler: Add `query_partial_data` and `rules_partial_data` limits to allow queries/rules to be evaluated with data from a single zone, if other zones are not available. #6526 * [FEATURE] Update prometheus alertmanager version to v0.28.0 and add new integration msteamsv2, jira, and rocketchat. #6590 * [FEATURE] Ingester/StoreGateway: Add `ResourceMonitor` module in Cortex, and add `ResourceBasedLimiter` in Ingesters and StoreGateways. #6674 +* [FEATURE] Support Prometheus remote write 2.0. #6330 * [FEATURE] Ingester: Support out-of-order native histogram ingestion. It automatically enabled when `-ingester.out-of-order-time-window > 0` and `-blocks-storage.tsdb.enable-native-histograms=true`. #6626 #6663 * [FEATURE] Ruler: Add support for percentage based sharding for rulers. #6680 * [FEATURE] Ruler: Add support for group labels. #6665 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index acd03b3de78..9c3a5d26055 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -2889,6 +2889,11 @@ ha_tracker: # CLI flag: -distributor.sign-write-requests [sign_write_requests: | default = false] +# EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push +# request. +# CLI flag: -distributor.remote-write2-enabled +[remote_write2_enabled: | default = false] + # EXPERIMENTAL: If enabled, distributor would use stream connection to send # requests to ingesters. # CLI flag: -distributor.use-stream-push diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 700fbf5beb7..25b7e404225 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -59,6 +59,7 @@ Currently experimental features are: - Distributor: - Do not extend writes on unhealthy ingesters (`-distributor.extend-writes=false`) - Accept multiple HA pairs in the same request (enabled via `-experimental.distributor.ha-tracker.mixed-ha-samples=true`) + - Accept Prometheus remote write 2.0 request (`-distributor.remote-write2-enabled=true`) - Tenant Deletion in Purger, for blocks storage. - Query-frontend: query stats tracking (`-frontend.query-stats-enabled`) - Blocks storage bucket index diff --git a/integration/e2e/util.go b/integration/e2e/util.go index dd10efa1ba0..95f71bc0d35 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -423,3 +424,78 @@ func CreateBlock( return id, nil } + +func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogram bool, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(ts) + + st := writev2.NewSymbolTable() + + lbs := labels.Labels{labels.Label{Name: "__name__", Value: name}} + for _, lbl := range additionalLabels { + lbs = append(lbs, labels.Label{Name: lbl.Name, Value: lbl.Value}) + } + + var ( + h *histogram.Histogram + fh *histogram.FloatHistogram + ph writev2.Histogram + ) + if floatHistogram { + fh = tsdbutil.GenerateTestFloatHistogram(int64(i)) + ph = writev2.FromFloatHistogram(tsMillis, fh) + } else { + h = tsdbutil.GenerateTestHistogram(int64(i)) + ph = writev2.FromIntHistogram(tsMillis, h) + } + + // Generate the series + series = append(series, writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lbs, nil), + Histograms: []writev2.Histogram{ph}, + }) + + symbols = st.Symbols() + + return +} + +func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries, vector model.Vector) { + tsMillis := TimeToMilliseconds(ts) + value := rand.Float64() + + st := writev2.NewSymbolTable() + lbs := labels.Labels{{Name: labels.MetricName, Value: name}} + + for _, label := range additionalLabels { + lbs = append(lbs, labels.Label{ + Name: label.Name, + Value: label.Value, + }) + } + series = append(series, writev2.TimeSeries{ + // Generate the series + LabelsRefs: st.SymbolizeLabels(lbs, nil), + Samples: []writev2.Sample{ + {Value: value, Timestamp: tsMillis}, + }, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + }) + symbols = st.Symbols() + + // Generate the expected vector when querying it + metric := model.Metric{} + metric[labels.MetricName] = model.LabelValue(name) + for _, lbl := range additionalLabels { + metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) + } + + vector = append(vector, &model.Sample{ + Metric: metric, + Value: model.SampleValue(value), + Timestamp: model.Time(tsMillis), + }) + + return +} diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index 9067b60c078..a6d7184dab3 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" yaml "gopkg.in/yaml.v3" @@ -147,6 +148,39 @@ func (c *Client) Push(timeseries []prompb.TimeSeries, metadata ...prompb.MetricM return res, nil } +// PushV2 the input timeseries to the remote endpoint +func (c *Client) PushV2(symbols []string, timeseries []writev2.TimeSeries) (*http.Response, error) { + // Create write request + data, err := proto.Marshal(&writev2.Request{Symbols: symbols, Timeseries: timeseries}) + if err != nil { + return nil, err + } + + // Create HTTP request + compressed := snappy.Encode(nil, data) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/prom/push", c.distributorAddress), bytes.NewReader(compressed)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf;proto=io.prometheus.write.v2.Request") + req.Header.Set("X-Prometheus-Remote-Write-Version", "2.0.0") + req.Header.Set("X-Scope-OrgID", c.orgID) + + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + // Execute HTTP request + res, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + return res, nil +} + func getNameAndAttributes(ts prompb.TimeSeries) (string, map[string]any) { var metricName string attributes := make(map[string]any) diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go new file mode 100644 index 00000000000..88ca384fb04 --- /dev/null +++ b/integration/remote_write_v2_test.go @@ -0,0 +1,327 @@ +//go:build integration_remote_write_v2 +// +build integration_remote_write_v2 + +package integration + +import ( + "math/rand" + "net/http" + "path" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" + "github.com/cortexproject/cortex/pkg/storage/tsdb" +) + +func TestIngesterRollingUpdate(t *testing.T) { + // Test ingester rolling update situation: when -distributor.remote-write2-enabled is true, and ingester uses the v1.19.0 image. + // Expected: remote write 2.0 push success, but response header values are set to "0". + const blockRangePeriod = 5 * time.Second + ingesterImage := "quay.io/cortexproject/cortex:v1.19.0" + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + distributorFlag := mergeFlags(flags, map[string]string{ + "-distributor.remote-write2-enabled": "true", + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + // Start all other services. + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, ingesterImage) + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), distributorFlag, "") + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": storeGateway.NetworkGRPCEndpoint()}), "") + + require.NoError(t, s.StartAndWaitReady(querier, ingester, distributor, storeGateway)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "0", "0") + + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "0", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestIngest(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-write2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestExemplar(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-write2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + tsMillis := e2e.TimeToMilliseconds(now) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries1Metadata.Type. + + HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. + UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: tsMillis}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: tsMillis}}, + }, + } + + res, err := c.PushV2(symbols, timeseries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "1") + + start := time.Now().Add(-time.Minute) + end := now.Add(time.Minute) + + exemplars, err := c.QueryExemplars("test_metric", start, end) + require.NoError(t, err) + require.Equal(t, 1, len(exemplars)) +} + +func testPushHeader(t *testing.T, header http.Header, expectedSamples, expectedHistogram, expectedExemplars string) { + require.Equal(t, expectedSamples, header.Get("X-Prometheus-Remote-Write-Samples-Written")) + require.Equal(t, expectedHistogram, header.Get("X-Prometheus-Remote-Write-Histograms-Written")) + require.Equal(t, expectedExemplars, header.Get("X-Prometheus-Remote-Write-Exemplars-Written")) +} diff --git a/pkg/api/api.go b/pkg/api/api.go index 1c68c426d8b..f6e6c2b1868 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -283,7 +283,7 @@ func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) { func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config, overrides *validation.Overrides) { distributorpb.RegisterDistributorServer(a.server.GRPC, d) - a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.RemoteWrite2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/api/v1/otlp/v1/metrics", push.OTLPHandler(pushConfig.OTLPMaxRecvMsgSize, overrides, pushConfig.OTLPConfig, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ring", "Distributor Ring Status") @@ -295,7 +295,7 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET") // Legacy Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.RemoteWrite2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") } @@ -328,12 +328,12 @@ func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { a.RegisterRoute("/ingester/renewTokens", http.HandlerFunc(i.RenewTokenHandler), false, "GET", "POST") a.RegisterRoute("/ingester/all_user_stats", http.HandlerFunc(i.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ingester/mode", http.HandlerFunc(i.ModeHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/ingester/push", push.Handler(pushConfig.RemoteWrite2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. // Legacy Routes a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/push", push.Handler(pushConfig.RemoteWrite2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. } func (a *API) RegisterTenantDeletion(api *purger.TenantDeletionAPI) { diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index f2995afbf22..e40b04439aa 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -37,6 +37,14 @@ message WriteResponse { int32 code = 1; string message = 2; } +message WriteResponse { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + int64 Samples = 1; + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + int64 Histograms = 2; + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + int64 Exemplars = 3; +} message TimeSeries { repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; diff --git a/pkg/cortexpb/histograms.go b/pkg/cortexpb/histograms.go index 60e7207a19a..d05dbaa7727 100644 --- a/pkg/cortexpb/histograms.go +++ b/pkg/cortexpb/histograms.go @@ -16,6 +16,7 @@ package cortexpb import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" ) func (h Histogram) IsFloatHistogram() bool { @@ -23,6 +24,30 @@ func (h Histogram) IsFloatHistogram() bool { return ok } +func HistogramWriteV2ProtoToHistogramProto(h writev2.Histogram) Histogram { + ph := Histogram{ + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + NegativeSpans: spansWriteV2ProtoToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeDeltas, + NegativeCounts: h.NegativeCounts, + PositiveSpans: spansWriteV2ProtoToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveDeltas, + PositiveCounts: h.PositiveCounts, + ResetHint: Histogram_ResetHint(h.ResetHint), + TimestampMs: h.Timestamp, + } + if h.IsFloatHistogram() { + ph.Count = &Histogram_CountFloat{CountFloat: h.GetCountFloat()} + ph.ZeroCount = &Histogram_ZeroCountFloat{ZeroCountFloat: h.GetZeroCountFloat()} + } else { + ph.Count = &Histogram_CountInt{CountInt: h.GetCountInt()} + ph.ZeroCount = &Histogram_ZeroCountInt{ZeroCountInt: h.GetZeroCountInt()} + } + return ph +} + // HistogramPromProtoToHistogramProto converts a prometheus protobuf Histogram to cortex protobuf Histogram. func HistogramPromProtoToHistogramProto(h prompb.Histogram) Histogram { ph := Histogram{ @@ -155,3 +180,12 @@ func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan { return spans } + +func spansWriteV2ProtoToSpansProto(s []writev2.BucketSpan) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 931bdbf98bd..9e8cd833d2d 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -154,6 +154,7 @@ type Config struct { ExtendWrites bool `yaml:"extend_writes"` SignWriteRequestsEnabled bool `yaml:"sign_write_requests"` UseStreamPush bool `yaml:"use_stream_push"` + RemoteWrite2Enabled bool `yaml:"remote_write2_enabled"` // Distributors ring DistributorRing RingConfig `yaml:"ring"` @@ -213,6 +214,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.") f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.") + f.BoolVar(&cfg.RemoteWrite2Enabled, "distributor.remote-write2-enabled", false, "EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push request.") f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") @@ -816,12 +818,21 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co keys := append(seriesKeys, metadataKeys...) initialMetadataIndex := len(seriesKeys) - err = d.doBatch(ctx, req, subRing, keys, initialMetadataIndex, validatedMetadata, validatedTimeseries, userID) + ws := WriteStats{} + + err = d.doBatch(ctx, req, subRing, keys, initialMetadataIndex, validatedMetadata, validatedTimeseries, userID, &ws) if err != nil { return nil, err } - return &cortexpb.WriteResponse{}, firstPartialErr + resp := &cortexpb.WriteResponse{} + if d.cfg.RemoteWrite2Enabled { + resp.Samples = ws.LoadSamples() + resp.Histograms = ws.LoadHistogram() + resp.Exemplars = ws.LoadExemplars() + } + + return resp, firstPartialErr } func (d *Distributor) updateLabelSetMetrics() { @@ -883,7 +894,7 @@ func (d *Distributor) cleanStaleIngesterMetrics() { } } -func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, subRing ring.ReadRing, keys []uint32, initialMetadataIndex int, validatedMetadata []*cortexpb.MetricMetadata, validatedTimeseries []cortexpb.PreallocTimeseries, userID string) error { +func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, subRing ring.ReadRing, keys []uint32, initialMetadataIndex int, validatedMetadata []*cortexpb.MetricMetadata, validatedTimeseries []cortexpb.PreallocTimeseries, userID string, ws *WriteStats) error { span, _ := opentracing.StartSpanFromContext(ctx, "doBatch") defer span.Finish() @@ -918,7 +929,7 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s } } - return d.send(localCtx, ingester, timeseries, metadata, req.Source) + return d.send(localCtx, ingester, timeseries, metadata, req.Source, ws) }, func() { cortexpb.ReuseSlice(req.Timeseries) req.Free() @@ -1152,7 +1163,7 @@ func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum, ws *WriteStats) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -1181,7 +1192,7 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time req.Metadata = metadata req.Source = source - _, err = c.PushPreAlloc(ctx, req) + resp, err = c.PushPreAlloc(ctx, req) // We should not reuse the req in case of errors: // See: https://github.com/grpc/grpc-go/issues/6355 @@ -1203,6 +1214,13 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time } } + if resp != nil { + // track write stats + ws.SetSamples(resp.Samples) + ws.SetHistograms(resp.Histograms) + ws.SetExemplars(resp.Exemplars) + } + return err } diff --git a/pkg/distributor/write_stats.go b/pkg/distributor/write_stats.go new file mode 100644 index 00000000000..0f7fbc332d0 --- /dev/null +++ b/pkg/distributor/write_stats.go @@ -0,0 +1,62 @@ +package distributor + +import ( + "go.uber.org/atomic" +) + +type WriteStats struct { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples atomic.Int64 + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms atomic.Int64 + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars atomic.Int64 +} + +func (w *WriteStats) SetSamples(samples int64) { + if w == nil { + return + } + + w.Samples.Store(samples) +} + +func (w *WriteStats) SetHistograms(histograms int64) { + if w == nil { + return + } + + w.Histograms.Store(histograms) +} + +func (w *WriteStats) SetExemplars(exemplars int64) { + if w == nil { + return + } + + w.Exemplars.Store(exemplars) +} + +func (w *WriteStats) LoadSamples() int64 { + if w == nil { + return 0 + } + + return w.Samples.Load() +} + +func (w *WriteStats) LoadHistogram() int64 { + if w == nil { + return 0 + } + + return w.Histograms.Load() +} + +func (w *WriteStats) LoadExemplars() int64 { + if w == nil { + return 0 + } + + return w.Exemplars.Load() +} diff --git a/pkg/distributor/write_stats_test.go b/pkg/distributor/write_stats_test.go new file mode 100644 index 00000000000..523f16788fe --- /dev/null +++ b/pkg/distributor/write_stats_test.go @@ -0,0 +1,41 @@ +package distributor + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_SetAndLoad(t *testing.T) { + ws := &WriteStats{} + + t.Run("Samples", func(t *testing.T) { + ws.SetSamples(3) + assert.Equal(t, int64(3), ws.LoadSamples()) + }) + t.Run("Histograms", func(t *testing.T) { + ws.SetHistograms(10) + assert.Equal(t, int64(10), ws.LoadHistogram()) + }) + t.Run("Exemplars", func(t *testing.T) { + ws.SetExemplars(2) + assert.Equal(t, int64(2), ws.LoadExemplars()) + }) +} + +func Test_NilReceiver(t *testing.T) { + var ws *WriteStats + + t.Run("Samples", func(t *testing.T) { + ws.SetSamples(3) + assert.Equal(t, int64(0), ws.LoadSamples()) + }) + t.Run("Histograms", func(t *testing.T) { + ws.SetHistograms(10) + assert.Equal(t, int64(0), ws.LoadHistogram()) + }) + t.Run("Exemplars", func(t *testing.T) { + ws.SetExemplars(2) + assert.Equal(t, int64(0), ws.LoadExemplars()) + }) +} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index c2dab4a54ec..2ca25e9b6d3 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1569,7 +1569,13 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte return &cortexpb.WriteResponse{}, httpgrpc.Errorf(code, "%s", wrapWithUser(firstPartialErr, userID).Error()) } - return &cortexpb.WriteResponse{}, nil + writeResponse := &cortexpb.WriteResponse{ + Samples: int64(succeededSamplesCount), + Histograms: int64(succeededHistogramsCount), + Exemplars: int64(succeededExemplarsCount), + } + + return writeResponse, nil } func (i *Ingester) PushStream(srv client.Ingester_PushStreamServer) error { diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 9cabb395228..730b3d46193 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -2,22 +2,45 @@ package push import ( "context" + "fmt" "net/http" + "strconv" + "strings" "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/storage/remote" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/log" ) +const ( + remoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" + remoteWriteVersion1HeaderValue = "0.1.0" + remoteWriteVersion20HeaderValue = "2.0.0" + appProtoContentType = "application/x-protobuf" + appProtoV1ContentType = "application/x-protobuf;proto=prometheus.WriteRequest" + appProtoV2ContentType = "application/x-protobuf;proto=io.prometheus.write.v2.Request" + + rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" + + errMsgNotEnabledPRW2 = "Not enabled prometheus remote write v2 push request" +) + // Func defines the type of the push. It is similar to http.HandlerFunc. type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) // Handler is a http.Handler which accepts WriteRequests. -func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { +func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() logger := log.WithContext(ctx, log.Logger) @@ -28,31 +51,240 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F logger = log.WithSourceIPs(source, logger) } } - var req cortexpb.PreallocWriteRequest - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + + // follow Prometheus https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = appProtoContentType + } + + msgType, err := parseProtoMsg(contentType) if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } - req.SkipLabelNameValidation = false - if req.Source == 0 { - req.Source = cortexpb.API + if msgType != config.RemoteWriteProtoMsgV1 && msgType != config.RemoteWriteProtoMsgV2 { + level.Error(logger).Log("Not accepted msg type", "msgType", msgType, "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + enc := r.Header.Get("Content-Encoding") + if enc == "" { + } else if enc != string(remote.SnappyBlockCompression) { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, remote.SnappyBlockCompression) + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return } - if _, err := push(ctx, &req.WriteRequest); err != nil { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if !ok { - http.Error(w, err.Error(), http.StatusInternalServerError) + switch msgType { + case config.RemoteWriteProtoMsgV1: + var req cortexpb.PreallocWriteRequest + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) return } - if resp.GetCode()/100 == 5 { - level.Error(logger).Log("msg", "push error", "err", err) - } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { - level.Warn(logger).Log("msg", "push refused", "err", err) + + req.SkipLabelNameValidation = false + if req.Source == 0 { + req.Source = cortexpb.API + } + + if _, err := push(ctx, &req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } + case config.RemoteWriteProtoMsgV2: + if remoteWrite2Enabled { + var req writev2.Request + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req, err := convertV2RequestToV1(&req) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req.SkipLabelNameValidation = false + // Current source is only API + if v1Req.Source == 0 { + v1Req.Source = cortexpb.API + } + + if resp, err := push(ctx, &v1Req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + setHeader(w, 0, 0, 0) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } else { + setHeader(w, resp.Samples, resp.Histograms, resp.Exemplars) + } + } else { + level.Error(logger).Log(errMsgNotEnabledPRW2) + http.Error(w, errMsgNotEnabledPRW2, http.StatusUnsupportedMediaType) + return } - http.Error(w, string(resp.Body), int(resp.Code)) } }) } + +func setHeader(w http.ResponseWriter, samples, histograms, exemplars int64) { + w.Header().Set(rw20WrittenSamplesHeader, strconv.FormatInt(samples, 10)) + w.Header().Set(rw20WrittenHistogramsHeader, strconv.FormatInt(histograms, 10)) + w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(exemplars, 10)) +} + +// Refer to parseProtoMsg in https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go +func parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) { + contentType = strings.TrimSpace(contentType) + + parts := strings.Split(contentType, ";") + if parts[0] != appProtoContentType { + return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType) + } + // Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter + for _, p := range parts[1:] { + pair := strings.Split(p, "=") + if len(pair) != 2 { + return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType) + } + if pair[0] == "proto" { + ret := config.RemoteWriteProtoMsg(pair[1]) + if err := ret.Validate(); err != nil { + return "", fmt.Errorf("got %v content type; %w", contentType, err) + } + return ret, nil + } + } + // No "proto=" parameter, assuming v1. + return config.RemoteWriteProtoMsgV1, nil +} + +func convertV2RequestToV1(req *writev2.Request) (cortexpb.PreallocWriteRequest, error) { + var v1Req cortexpb.PreallocWriteRequest + v1Timeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) + var v1Metadata []*cortexpb.MetricMetadata + + b := labels.NewScratchBuilder(0) + symbols := req.Symbols + for _, v2Ts := range req.Timeseries { + lbs := v2Ts.ToLabels(&b, symbols) + v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{ + TimeSeries: &cortexpb.TimeSeries{ + Labels: cortexpb.FromLabelsToLabelAdapters(lbs), + Samples: convertV2ToV1Samples(v2Ts.Samples), + Exemplars: convertV2ToV1Exemplars(b, symbols, v2Ts.Exemplars), + Histograms: convertV2ToV1Histograms(v2Ts.Histograms), + }, + }) + + if shouldConvertV2Metadata(v2Ts.Metadata) { + metricName, err := extract.MetricNameFromLabels(lbs) + if err != nil { + return v1Req, err + } + v1Metadata = append(v1Metadata, convertV2ToV1Metadata(metricName, symbols, v2Ts.Metadata)) + } + } + + v1Req.Timeseries = v1Timeseries + v1Req.Metadata = v1Metadata + + return v1Req, nil +} + +func shouldConvertV2Metadata(metadata writev2.Metadata) bool { + return !(metadata.HelpRef == 0 && metadata.UnitRef == 0 && metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED) +} + +func convertV2ToV1Histograms(histograms []writev2.Histogram) []cortexpb.Histogram { + v1Histograms := make([]cortexpb.Histogram, 0, len(histograms)) + + for _, h := range histograms { + v1Histograms = append(v1Histograms, cortexpb.HistogramWriteV2ProtoToHistogramProto(h)) + } + + return v1Histograms +} + +func convertV2ToV1Samples(samples []writev2.Sample) []cortexpb.Sample { + v1Samples := make([]cortexpb.Sample, 0, len(samples)) + + for _, s := range samples { + v1Samples = append(v1Samples, cortexpb.Sample{ + Value: s.Value, + TimestampMs: s.Timestamp, + }) + } + + return v1Samples +} + +func convertV2ToV1Metadata(name string, symbols []string, metadata writev2.Metadata) *cortexpb.MetricMetadata { + t := cortexpb.UNKNOWN + + switch metadata.Type { + case writev2.Metadata_METRIC_TYPE_COUNTER: + t = cortexpb.COUNTER + case writev2.Metadata_METRIC_TYPE_GAUGE: + t = cortexpb.GAUGE + case writev2.Metadata_METRIC_TYPE_HISTOGRAM: + t = cortexpb.HISTOGRAM + case writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + t = cortexpb.GAUGEHISTOGRAM + case writev2.Metadata_METRIC_TYPE_SUMMARY: + t = cortexpb.SUMMARY + case writev2.Metadata_METRIC_TYPE_INFO: + t = cortexpb.INFO + case writev2.Metadata_METRIC_TYPE_STATESET: + t = cortexpb.STATESET + } + + return &cortexpb.MetricMetadata{ + Type: t, + MetricFamilyName: name, + Unit: symbols[metadata.UnitRef], + Help: symbols[metadata.HelpRef], + } +} + +func convertV2ToV1Exemplars(b labels.ScratchBuilder, symbols []string, v2Exemplars []writev2.Exemplar) []cortexpb.Exemplar { + v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars)) + for _, e := range v2Exemplars { + promExemplar := e.ToExemplar(&b, symbols) + v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{ + Labels: cortexpb.FromLabelsToLabelAdapters(promExemplar.Labels), + Value: e.Value, + TimestampMs: e.Timestamp, + }) + } + return v1Exemplars +} diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index b806011a611..85fcaf29f68 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -10,6 +10,8 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/middleware" @@ -17,30 +19,239 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" ) +func Test_convertV2RequestToV1(t *testing.T) { + var v2Req writev2.Request + + fh := tsdbutil.GenerateTestFloatHistogram(1) + ph := writev2.FromFloatHistogram(4, fh) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 3, Timestamp: 3}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Histograms: []writev2.Histogram{ph, ph}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + } + + v2Req.Symbols = symbols + v2Req.Timeseries = timeseries + v1Req, err := convertV2RequestToV1(&v2Req) + assert.NoError(t, err) + expectedSamples := 3 + expectedExemplars := 2 + expectedHistograms := 2 + countSamples := 0 + countExemplars := 0 + countHistograms := 0 + + for _, ts := range v1Req.Timeseries { + countSamples += len(ts.Samples) + countExemplars += len(ts.Exemplars) + countHistograms += len(ts.Histograms) + } + + assert.Equal(t, expectedSamples, countSamples) + assert.Equal(t, expectedExemplars, countExemplars) + assert.Equal(t, expectedHistograms, countHistograms) + assert.Equal(t, 4, len(v1Req.Timeseries)) + assert.Equal(t, 1, len(v1Req.Metadata)) +} + func TestHandler_remoteWrite(t *testing.T) { - req := createRequest(t, createPrometheusRemoteWriteProtobuf(t)) - resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + t.Run("remote write v1", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + + // test header value + respHeader := resp.Header() + assert.Equal(t, "1", respHeader[rw20WrittenSamplesHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenHistogramsHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenExemplarsHeader][0]) + }) + t.Run("remote write v2 with not support remote write 2.0", func(t *testing.T) { + handler := Handler(false, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusUnsupportedMediaType, resp.Code) + }) +} + +func TestHandler_ContentTypeAndEncoding(t *testing.T) { + sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + tests := []struct { + description string + reqHeaders map[string]string + expectedCode int + isV2 bool + }{ + { + description: "[RW 2.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "[RW 1.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "0.1.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-encoding", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=io.prometheus.write.v2.Request", + "Content-Encoding": "zstd", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "no header, should treated as RW 1.0", + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-type, should treated as RW 1.0", + reqHeaders: map[string]string{ + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-encoding", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + if test.isV2 { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } else { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexWriteRequestProtobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } + }) + } } func TestHandler_cortexWriteRequest(t *testing.T) { - req := createRequest(t, createCortexWriteRequestProtobuf(t, false)) - resp := httptest.NewRecorder() sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") - handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + t.Run("remote write v1", func(t *testing.T) { + req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.API), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) } func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) { for _, req := range []*http.Request{ - createRequest(t, createCortexWriteRequestProtobuf(t, true)), - createRequest(t, createCortexWriteRequestProtobuf(t, false)), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), } { resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) handler.ServeHTTP(resp, req) assert.Equal(t, 200, resp.Code) } @@ -54,21 +265,86 @@ func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_ assert.Equal(t, "foo", request.Timeseries[0].Labels[0].Value) assert.Equal(t, expectSource, request.Source) assert.False(t, request.SkipLabelNameValidation) - return &cortexpb.WriteResponse{}, nil + + resp := &cortexpb.WriteResponse{ + Samples: 1, + Histograms: 1, + Exemplars: 1, + } + + return resp, nil + } +} + +func createRequestWithHeaders(t *testing.T, headers map[string]string, protobuf []byte) *http.Request { + t.Helper() + inoutBytes := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) + require.NoError(t, err) + + for k, v := range headers { + req.Header.Set(k, v) } + return req } -func createRequest(t *testing.T, protobuf []byte) *http.Request { +func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request { t.Helper() inoutBytes := snappy.Encode(nil, protobuf) req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) require.NoError(t, err) + req.Header.Add("Content-Encoding", "snappy") - req.Header.Set("Content-Type", "application/x-protobuf") - req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + + if isV2 { + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + return req + } + + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) return req } +func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + +func createPrometheusRemoteWriteV2Protobuf(t *testing.T) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { t.Helper() input := prompb.WriteRequest{ @@ -87,7 +363,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { require.NoError(t, err) return inoutBytes } -func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool) []byte { +func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { t.Helper() ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ @@ -101,7 +377,7 @@ func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool } input := cortexpb.WriteRequest{ Timeseries: []cortexpb.PreallocTimeseries{ts}, - Source: cortexpb.RULE, + Source: source, SkipLabelNameValidation: skipLabelNameValidation, } inoutBytes, err := input.Marshal() From b84bc8cc0727f99b9c69c2478549fd3966e6903c Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Mon, 14 Apr 2025 10:53:24 +0900 Subject: [PATCH 2/6] Change to not break exist behavior Signed-off-by: SungJin1212 --- integration/remote_write_v2_test.go | 63 ++++++++++++ pkg/util/push/push.go | 143 ++++++++++++++-------------- pkg/util/push/push_test.go | 7 -- 3 files changed, 136 insertions(+), 77 deletions(-) diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go index 88ca384fb04..4ebcc142077 100644 --- a/integration/remote_write_v2_test.go +++ b/integration/remote_write_v2_test.go @@ -137,6 +137,69 @@ func TestIngesterRollingUpdate(t *testing.T) { } } +func TestIngest_SenderSendPRW2_DistributorNotAllowPRW2(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-write2-enabled": "false", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, _ := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) +} + func TestIngest(t *testing.T) { const blockRangePeriod = 5 * time.Second diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 730b3d46193..bbe3e8d489c 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -32,8 +32,6 @@ const ( rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" - - errMsgNotEnabledPRW2 = "Not enabled prometheus remote write v2 push request" ) // Func defines the type of the push. It is similar to http.HandlerFunc. @@ -52,36 +50,7 @@ func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware } } - // follow Prometheus https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go - contentType := r.Header.Get("Content-Type") - if contentType == "" { - contentType = appProtoContentType - } - - msgType, err := parseProtoMsg(contentType) - if err != nil { - level.Error(logger).Log("Error decoding remote write request", "err", err) - http.Error(w, err.Error(), http.StatusUnsupportedMediaType) - return - } - - if msgType != config.RemoteWriteProtoMsgV1 && msgType != config.RemoteWriteProtoMsgV2 { - level.Error(logger).Log("Not accepted msg type", "msgType", msgType, "err", err) - http.Error(w, err.Error(), http.StatusUnsupportedMediaType) - return - } - - enc := r.Header.Get("Content-Encoding") - if enc == "" { - } else if enc != string(remote.SnappyBlockCompression) { - err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, remote.SnappyBlockCompression) - level.Error(logger).Log("Error decoding remote write request", "err", err) - http.Error(w, err.Error(), http.StatusUnsupportedMediaType) - return - } - - switch msgType { - case config.RemoteWriteProtoMsgV1: + handlePRW1 := func() { var req cortexpb.PreallocWriteRequest err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) if err != nil { @@ -108,55 +77,89 @@ func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware } http.Error(w, string(resp.Body), int(resp.Code)) } - case config.RemoteWriteProtoMsgV2: - if remoteWrite2Enabled { - var req writev2.Request - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) - if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } + } - v1Req, err := convertV2RequestToV1(&req) - if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } + handlePRW2 := func() { + var req writev2.Request + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } - v1Req.SkipLabelNameValidation = false - // Current source is only API - if v1Req.Source == 0 { - v1Req.Source = cortexpb.API - } + v1Req, err := convertV2RequestToV1(&req) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } - if resp, err := push(ctx, &v1Req.WriteRequest); err != nil { - resp, ok := httpgrpc.HTTPResponseFromError(err) - setHeader(w, 0, 0, 0) - if !ok { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if resp.GetCode()/100 == 5 { - level.Error(logger).Log("msg", "push error", "err", err) - } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { - level.Warn(logger).Log("msg", "push refused", "err", err) - } - http.Error(w, string(resp.Body), int(resp.Code)) - } else { - setHeader(w, resp.Samples, resp.Histograms, resp.Exemplars) + v1Req.SkipLabelNameValidation = false + if v1Req.Source == 0 { + v1Req.Source = cortexpb.API + } + + if resp, err := push(ctx, &v1Req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + setPRW2RespHeader(w, 0, 0, 0) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) } else { - level.Error(logger).Log(errMsgNotEnabledPRW2) - http.Error(w, errMsgNotEnabledPRW2, http.StatusUnsupportedMediaType) + setPRW2RespHeader(w, resp.Samples, resp.Histograms, resp.Exemplars) + } + } + + if remoteWrite2Enabled { + // follow Prometheus https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = appProtoContentType + } + + msgType, err := parseProtoMsg(contentType) + if err != nil { + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } + + if msgType != config.RemoteWriteProtoMsgV1 && msgType != config.RemoteWriteProtoMsgV2 { + level.Error(logger).Log("Not accepted msg type", "msgType", msgType, "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + enc := r.Header.Get("Content-Encoding") + if enc == "" { + } else if enc != string(remote.SnappyBlockCompression) { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, remote.SnappyBlockCompression) + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + switch msgType { + case config.RemoteWriteProtoMsgV1: + handlePRW1() + case config.RemoteWriteProtoMsgV2: + handlePRW2() + } + } else { + handlePRW1() } }) } -func setHeader(w http.ResponseWriter, samples, histograms, exemplars int64) { +func setPRW2RespHeader(w http.ResponseWriter, samples, histograms, exemplars int64) { w.Header().Set(rw20WrittenSamplesHeader, strconv.FormatInt(samples, 10)) w.Header().Set(rw20WrittenHistogramsHeader, strconv.FormatInt(histograms, 10)) w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(exemplars, 10)) diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index 85fcaf29f68..fd23ac1dff6 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -98,13 +98,6 @@ func TestHandler_remoteWrite(t *testing.T) { assert.Equal(t, "1", respHeader[rw20WrittenHistogramsHeader][0]) assert.Equal(t, "1", respHeader[rw20WrittenExemplarsHeader][0]) }) - t.Run("remote write v2 with not support remote write 2.0", func(t *testing.T) { - handler := Handler(false, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) - req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) - resp := httptest.NewRecorder() - handler.ServeHTTP(resp, req) - assert.Equal(t, http.StatusUnsupportedMediaType, resp.Code) - }) } func TestHandler_ContentTypeAndEncoding(t *testing.T) { From 2fefcf99253a9868f25b9a9d10569a1e95cc20a0 Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Tue, 15 Apr 2025 13:36:07 +0900 Subject: [PATCH 3/6] Add benchmarks Signed-off-by: SungJin1212 --- pkg/util/push/push_test.go | 144 +++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index fd23ac1dff6..46cb0770f75 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -3,12 +3,14 @@ package push import ( "bytes" "context" + "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/golang/snappy" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -19,6 +21,148 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" ) +var ( + testHistogram = histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 3, + Sum: 20, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{2}, + } +) + +func makeV2ReqWithSeries(num int) *writev2.Request { + ts := make([]writev2.TimeSeries, 0, num) + symbols := []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + for i := 0; i < num; i++ { + ts = append(ts, writev2.TimeSeries{ + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 10}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}}, + Histograms: []writev2.Histogram{ + writev2.FromIntHistogram(10, &testHistogram), + writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)), + }, + }) + } + + return &writev2.Request{ + Symbols: symbols, + Timeseries: ts, + } +} + +func createPRW1HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + v1Req, err := convertV2RequestToV1(series) + if err != nil { + return nil, err + } + protobuf, err := v1Req.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func createPRW2HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + protobuf, err := series.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func Benchmark_Handler(b *testing.B) { + mockHandler := func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { + // Nothing to do. + return &cortexpb.WriteResponse{}, nil + } + testSeriesNums := []int{10, 100, 500, 1000} + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("PRW1 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW1HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + b.Run(fmt.Sprintf("PRW2 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW2HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + } +} + +func Benchmark_convertV2RequestToV1(b *testing.B) { + testSeriesNums := []int{100, 500, 1000} + + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("%d series", seriesNum), func(b *testing.B) { + series := makeV2ReqWithSeries(seriesNum) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := convertV2RequestToV1(series) + require.NoError(b, err) + } + }) + } +} + func Test_convertV2RequestToV1(t *testing.T) { var v2Req writev2.Request From 5f5a725651e1b68a1c4ddfed0f15618d51397aba Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Mon, 14 Jul 2025 21:46:06 +0900 Subject: [PATCH 4/6] rebase from stream connection Signed-off-by: SungJin1212 --- docs/configuration/config-file-reference.md | 10 +- pkg/cortexpb/cortex.pb.go | 274 ++++++++++++++------ pkg/cortexpb/cortex.proto | 8 +- pkg/distributor/distributor.go | 3 +- pkg/util/push/push.go | 4 +- 5 files changed, 211 insertions(+), 88 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 9c3a5d26055..b7f2ce6b0dd 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -2889,16 +2889,16 @@ ha_tracker: # CLI flag: -distributor.sign-write-requests [sign_write_requests: | default = false] -# EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push -# request. -# CLI flag: -distributor.remote-write2-enabled -[remote_write2_enabled: | default = false] - # EXPERIMENTAL: If enabled, distributor would use stream connection to send # requests to ingesters. # CLI flag: -distributor.use-stream-push [use_stream_push: | default = false] +# EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push +# request. +# CLI flag: -distributor.remote-write2-enabled +[remote_write2_enabled: | default = false] + ring: kvstore: # Backend storage to use for the ring. Supported values are: consul, etcd, diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index 04eab395bc8..e0dac736baf 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -263,6 +263,12 @@ func (m *StreamWriteRequest) GetRequest() *WriteRequest { type WriteResponse struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int64 `protobuf:"varint,3,opt,name=Samples,proto3" json:"Samples,omitempty"` + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int64 `protobuf:"varint,4,opt,name=Histograms,proto3" json:"Histograms,omitempty"` + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int64 `protobuf:"varint,5,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"` } func (m *WriteResponse) Reset() { *m = WriteResponse{} } @@ -311,6 +317,27 @@ func (m *WriteResponse) GetMessage() string { return "" } +func (m *WriteResponse) GetSamples() int64 { + if m != nil { + return m.Samples + } + return 0 +} + +func (m *WriteResponse) GetHistograms() int64 { + if m != nil { + return m.Histograms + } + return 0 +} + +func (m *WriteResponse) GetExemplars() int64 { + if m != nil { + return m.Exemplars + } + return 0 +} + type TimeSeries struct { Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` // Sorted by time, oldest sample first. @@ -945,80 +972,81 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1153 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0x1b, 0xc5, - 0x1b, 0xde, 0xc9, 0xfa, 0xf3, 0xb5, 0xe3, 0x6e, 0xe7, 0x17, 0xf5, 0xb7, 0x04, 0x75, 0x9d, 0x2e, - 0x02, 0x2c, 0x84, 0x02, 0x0a, 0x02, 0xd4, 0xaa, 0x20, 0xd9, 0xad, 0xdb, 0x44, 0xad, 0x9d, 0x68, - 0xec, 0x50, 0x95, 0x8b, 0x35, 0xb5, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0xb8, 0x6a, 0x38, 0x71, - 0x01, 0x71, 0xe4, 0xcc, 0x0d, 0x71, 0xe1, 0xca, 0x7f, 0xd1, 0x63, 0x8e, 0x55, 0x0f, 0x11, 0x75, - 0x2f, 0xe5, 0xd6, 0x03, 0x7f, 0x00, 0x9a, 0xd9, 0x2f, 0xa7, 0x69, 0xc5, 0x25, 0xb7, 0x79, 0x9f, - 0xf7, 0x79, 0xdf, 0x79, 0xe6, 0xfd, 0x58, 0x1b, 0xea, 0xe3, 0x20, 0x12, 0xec, 0xf1, 0x76, 0x18, - 0x05, 0x22, 0xc0, 0x95, 0xd8, 0x0a, 0x1f, 0x6c, 0x6e, 0xcc, 0x82, 0x59, 0xa0, 0xc0, 0x4f, 0xe4, - 0x29, 0xf6, 0xdb, 0xef, 0xc0, 0xc5, 0x1e, 0xe3, 0x9c, 0xce, 0xd8, 0x3d, 0x47, 0xcc, 0x3b, 0x8b, - 0x29, 0x61, 0xd3, 0x6b, 0x85, 0x57, 0xbf, 0x35, 0x35, 0xfb, 0x47, 0x1d, 0xea, 0xf7, 0x22, 0x47, - 0x30, 0xc2, 0xbe, 0x5b, 0x30, 0x2e, 0xf0, 0x01, 0x80, 0x70, 0x3c, 0xc6, 0x59, 0xe4, 0x30, 0x6e, - 0xa2, 0x2d, 0xbd, 0x55, 0xdb, 0xd9, 0xd8, 0x4e, 0x2f, 0xd8, 0x1e, 0x3a, 0x1e, 0x1b, 0x28, 0x5f, - 0x67, 0xf3, 0xc9, 0x49, 0x53, 0x7b, 0x76, 0xd2, 0xc4, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x8c, 0x87, - 0x59, 0x1c, 0x59, 0xc9, 0x81, 0xaf, 0x42, 0x69, 0x10, 0x2c, 0xa2, 0x31, 0x33, 0xd7, 0xb6, 0x50, - 0xab, 0xb1, 0x73, 0x25, 0xcf, 0xb6, 0x7a, 0xf3, 0x76, 0x4c, 0xea, 0xfa, 0x0b, 0x8f, 0x24, 0x01, - 0xf8, 0x1a, 0x54, 0x3c, 0x26, 0xe8, 0x84, 0x0a, 0x6a, 0xea, 0x4a, 0x8a, 0x99, 0x07, 0xf7, 0x98, - 0x88, 0x9c, 0x71, 0x2f, 0xf1, 0x77, 0x0a, 0x4f, 0x4e, 0x9a, 0x88, 0x64, 0x7c, 0x7c, 0x1d, 0x36, - 0xf9, 0x43, 0x27, 0x1c, 0xb9, 0xf4, 0x01, 0x73, 0x47, 0x3e, 0xf5, 0xd8, 0xe8, 0x11, 0x75, 0x9d, - 0x09, 0x15, 0x4e, 0xe0, 0x9b, 0x2f, 0xcb, 0x5b, 0xa8, 0x55, 0x21, 0xff, 0x97, 0x94, 0xbb, 0x92, - 0xd1, 0xa7, 0x1e, 0xfb, 0x26, 0xf3, 0xe3, 0x1e, 0xe8, 0x84, 0x4d, 0xcd, 0xbf, 0x25, 0xad, 0xb6, - 0xf3, 0xee, 0xea, 0xad, 0xaf, 0x15, 0xb2, 0x73, 0x59, 0xd6, 0xe1, 0xf8, 0xa4, 0x89, 0x9e, 0x9d, - 0x34, 0xcf, 0xd6, 0x99, 0xc8, 0x3c, 0x76, 0x13, 0x20, 0x7f, 0x1e, 0x2e, 0x83, 0xde, 0x3e, 0xd8, - 0x33, 0x34, 0x5c, 0x81, 0x02, 0x39, 0xbc, 0xdb, 0x35, 0x90, 0xfd, 0x27, 0x02, 0x3c, 0x10, 0x11, - 0xa3, 0xde, 0xa9, 0x6e, 0x6c, 0x42, 0x65, 0xc8, 0x7c, 0xea, 0x8b, 0xbd, 0x9b, 0x26, 0xda, 0x42, - 0xad, 0x2a, 0xc9, 0x6c, 0xfc, 0x29, 0x94, 0x13, 0x9a, 0x2a, 0x6c, 0x6d, 0xe7, 0xd2, 0x9b, 0x0b, - 0x4b, 0x52, 0x5a, 0xfa, 0xa8, 0x97, 0xe7, 0xf4, 0xa8, 0xaf, 0x60, 0x3d, 0xb9, 0x87, 0x87, 0x81, - 0xcf, 0x19, 0xc6, 0x50, 0x18, 0x07, 0x13, 0xa6, 0x94, 0x16, 0x89, 0x3a, 0x63, 0x13, 0xca, 0x5e, - 0x1c, 0xae, 0x54, 0x56, 0x49, 0x6a, 0xda, 0xff, 0x20, 0x80, 0x7c, 0x9c, 0x70, 0x1b, 0x4a, 0xaa, - 0x55, 0xe9, 0xd0, 0xfd, 0x2f, 0x97, 0xa7, 0x1a, 0x74, 0x40, 0x9d, 0xa8, 0xb3, 0x91, 0xcc, 0x5c, - 0x5d, 0x41, 0xed, 0x09, 0x0d, 0x05, 0x8b, 0x48, 0x12, 0x28, 0x2b, 0xc2, 0xa9, 0x17, 0xba, 0x8c, - 0x9b, 0x6b, 0x2a, 0x87, 0x91, 0xe7, 0x18, 0x28, 0x87, 0x9a, 0x12, 0x8d, 0xa4, 0x34, 0xfc, 0x05, - 0x54, 0xd9, 0x63, 0xe6, 0x85, 0x2e, 0x8d, 0x78, 0x32, 0x61, 0x38, 0x8f, 0xe9, 0x26, 0xae, 0x24, - 0x2a, 0xa7, 0xe2, 0xab, 0x00, 0x73, 0x87, 0x8b, 0x60, 0x16, 0x51, 0x8f, 0x9b, 0x85, 0xd7, 0x05, - 0xef, 0xa6, 0xbe, 0x24, 0x72, 0x85, 0x6c, 0x7f, 0x0e, 0xd5, 0xec, 0x3d, 0xb2, 0x62, 0x72, 0x32, - 0x55, 0xc5, 0xea, 0x44, 0x9d, 0xf1, 0x06, 0x14, 0x1f, 0x51, 0x77, 0x11, 0xd7, 0xab, 0x4e, 0x62, - 0xc3, 0x6e, 0x43, 0x29, 0x7e, 0x42, 0xee, 0x97, 0x41, 0x28, 0xf1, 0xe3, 0x2b, 0x50, 0x57, 0x3b, - 0x27, 0xa8, 0x17, 0x8e, 0x3c, 0xae, 0x82, 0x75, 0x52, 0xcb, 0xb0, 0x1e, 0xb7, 0x7f, 0x5d, 0x83, - 0xc6, 0xe9, 0xa5, 0xc1, 0x5f, 0x42, 0x41, 0x1c, 0x85, 0x71, 0xaa, 0xc6, 0xce, 0x7b, 0x6f, 0x5b, - 0xae, 0xc4, 0x1c, 0x1e, 0x85, 0x8c, 0xa8, 0x00, 0xfc, 0x31, 0x60, 0x4f, 0x61, 0xa3, 0x29, 0xf5, - 0x1c, 0xf7, 0x48, 0x2d, 0x58, 0xd2, 0x61, 0x23, 0xf6, 0xdc, 0x52, 0x0e, 0xb9, 0x57, 0xf2, 0x99, - 0x73, 0xe6, 0x86, 0x66, 0x41, 0xf9, 0xd5, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x66, 0x31, 0xc6, 0xe4, - 0xd9, 0x3e, 0x02, 0xc8, 0x6f, 0xc2, 0x35, 0x28, 0x1f, 0xf6, 0xef, 0xf4, 0xf7, 0xef, 0xf5, 0x0d, - 0x4d, 0x1a, 0x37, 0xf6, 0x0f, 0xfb, 0xc3, 0x2e, 0x31, 0x10, 0xae, 0x42, 0xf1, 0x76, 0xfb, 0xf0, - 0x76, 0xd7, 0x58, 0xc3, 0xeb, 0x50, 0xdd, 0xdd, 0x1b, 0x0c, 0xf7, 0x6f, 0x93, 0x76, 0xcf, 0xd0, - 0x31, 0x86, 0x86, 0xf2, 0xe4, 0x58, 0x41, 0x86, 0x0e, 0x0e, 0x7b, 0xbd, 0x36, 0xb9, 0x6f, 0x14, - 0xe5, 0xca, 0xed, 0xf5, 0x6f, 0xed, 0x1b, 0x25, 0x5c, 0x87, 0xca, 0x60, 0xd8, 0x1e, 0x76, 0x07, - 0xdd, 0xa1, 0x51, 0xb6, 0xef, 0x40, 0x29, 0xbe, 0xfa, 0x1c, 0x06, 0xd1, 0xfe, 0x09, 0x41, 0x25, - 0x1d, 0x9e, 0xf3, 0x18, 0xec, 0x53, 0x23, 0xf1, 0xd6, 0x96, 0xeb, 0x67, 0x5b, 0x7e, 0x5c, 0x84, - 0x6a, 0x36, 0x8c, 0xf8, 0x32, 0x54, 0xc7, 0xc1, 0xc2, 0x17, 0x23, 0xc7, 0x17, 0xaa, 0xe5, 0x85, - 0x5d, 0x8d, 0x54, 0x14, 0xb4, 0xe7, 0x0b, 0x7c, 0x05, 0x6a, 0xb1, 0x7b, 0xea, 0x06, 0x34, 0xfe, - 0xa8, 0xa0, 0x5d, 0x8d, 0x80, 0x02, 0x6f, 0x49, 0x0c, 0x1b, 0xa0, 0xf3, 0x85, 0xa7, 0x6e, 0x42, - 0x44, 0x1e, 0xf1, 0x25, 0x28, 0xf1, 0xf1, 0x9c, 0x79, 0x54, 0x35, 0xf7, 0x22, 0x49, 0x2c, 0xfc, - 0x3e, 0x34, 0xbe, 0x67, 0x51, 0x30, 0x12, 0xf3, 0x88, 0xf1, 0x79, 0xe0, 0x4e, 0x54, 0xa3, 0x11, - 0x59, 0x97, 0xe8, 0x30, 0x05, 0xf1, 0x07, 0x09, 0x2d, 0xd7, 0x55, 0x52, 0xba, 0x10, 0xa9, 0x4b, - 0xfc, 0x46, 0xaa, 0xed, 0x23, 0x30, 0x56, 0x78, 0xb1, 0xc0, 0xb2, 0x12, 0x88, 0x48, 0x23, 0x63, - 0xc6, 0x22, 0xdb, 0xd0, 0xf0, 0xd9, 0x8c, 0x0a, 0xe7, 0x11, 0x1b, 0xf1, 0x90, 0xfa, 0xdc, 0xac, - 0xbc, 0xfe, 0x33, 0xd6, 0x59, 0x8c, 0x1f, 0x32, 0x31, 0x08, 0xa9, 0x9f, 0x6c, 0xe8, 0x7a, 0x1a, - 0x21, 0x31, 0x8e, 0x3f, 0x84, 0x0b, 0x59, 0x8a, 0x09, 0x73, 0x05, 0xe5, 0x66, 0x75, 0x4b, 0x6f, - 0x61, 0x92, 0x65, 0xbe, 0xa9, 0xd0, 0x53, 0x44, 0xa5, 0x8d, 0x9b, 0xb0, 0xa5, 0xb7, 0x50, 0x4e, - 0x54, 0xc2, 0xe4, 0xe7, 0xad, 0x11, 0x06, 0xdc, 0x59, 0x11, 0x55, 0xfb, 0x6f, 0x51, 0x69, 0x44, - 0x26, 0x2a, 0x4b, 0x91, 0x88, 0xaa, 0xc7, 0xa2, 0x52, 0x38, 0x17, 0x95, 0x11, 0x13, 0x51, 0xeb, - 0xb1, 0xa8, 0x14, 0x4e, 0x44, 0x5d, 0x07, 0x88, 0x18, 0x67, 0x62, 0x34, 0x97, 0x95, 0x6f, 0xa8, - 0x8f, 0xc0, 0xe5, 0x37, 0x7c, 0xc6, 0xb6, 0x89, 0x64, 0xed, 0x3a, 0xbe, 0x20, 0xd5, 0x28, 0x3d, - 0x9e, 0x99, 0xbf, 0x0b, 0x67, 0xe7, 0xef, 0x1a, 0x54, 0xb3, 0xd0, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, - 0x7e, 0x77, 0x60, 0x20, 0x5c, 0x82, 0xb5, 0xfe, 0xbe, 0xb1, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, - 0xf9, 0x77, 0x0b, 0x75, 0xca, 0x50, 0x54, 0xe2, 0x3b, 0x75, 0x80, 0xbc, 0xf7, 0xf6, 0x75, 0x80, - 0xbc, 0x50, 0x72, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x78, 0x9e, 0x2f, 0x92, 0xc4, 0x92, 0xb8, 0xcb, - 0xfc, 0x99, 0x98, 0xab, 0x31, 0x5e, 0x27, 0x89, 0xd5, 0xf9, 0xfa, 0xf8, 0xb9, 0xa5, 0x3d, 0x7d, - 0x6e, 0x69, 0xaf, 0x9e, 0x5b, 0xe8, 0x87, 0xa5, 0x85, 0xfe, 0x58, 0x5a, 0xe8, 0xc9, 0xd2, 0x42, - 0xc7, 0x4b, 0x0b, 0xfd, 0xb5, 0xb4, 0xd0, 0xcb, 0xa5, 0xa5, 0xbd, 0x5a, 0x5a, 0xe8, 0x97, 0x17, - 0x96, 0x76, 0xfc, 0xc2, 0xd2, 0x9e, 0xbe, 0xb0, 0xb4, 0x6f, 0xb3, 0x3f, 0x58, 0x0f, 0x4a, 0xea, - 0x1f, 0xd5, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x82, 0x66, 0x44, 0xf2, 0x81, 0x09, 0x00, - 0x00, + // 1183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xbd, 0x8f, 0x1b, 0x45, + 0x14, 0xdf, 0xb9, 0xf5, 0xd7, 0x3e, 0xfb, 0x9c, 0xcd, 0x70, 0x0a, 0xcb, 0x41, 0xd6, 0x8e, 0x11, + 0x60, 0x21, 0x74, 0xa0, 0x43, 0x80, 0x12, 0x45, 0x48, 0x76, 0xe2, 0xe4, 0x4e, 0x89, 0x7d, 0xa7, + 0xb1, 0x8f, 0x28, 0x34, 0xd6, 0xc4, 0x37, 0xb6, 0x57, 0xd9, 0x2f, 0x76, 0xc6, 0x51, 0x8e, 0x8a, + 0x06, 0x44, 0x49, 0x43, 0x43, 0x87, 0x68, 0x68, 0xf9, 0x2f, 0x52, 0x5e, 0x19, 0xa5, 0x38, 0x11, + 0xa7, 0x09, 0x5d, 0x0a, 0xfe, 0x00, 0x34, 0xb3, 0x5f, 0xbe, 0x5c, 0x22, 0x9a, 0x74, 0xf3, 0x7e, + 0xef, 0x63, 0x7e, 0xf3, 0xde, 0xef, 0xad, 0x0d, 0xb5, 0x49, 0x10, 0x09, 0xf6, 0x70, 0x2b, 0x8c, + 0x02, 0x11, 0xe0, 0x4a, 0x6c, 0x85, 0xf7, 0x36, 0x37, 0x66, 0xc1, 0x2c, 0x50, 0xe0, 0xa7, 0xf2, + 0x14, 0xfb, 0x5b, 0xef, 0xc0, 0xf9, 0x3e, 0xe3, 0x9c, 0xce, 0xd8, 0x1d, 0x47, 0xcc, 0xbb, 0x8b, + 0x29, 0x61, 0xd3, 0x2b, 0x85, 0x17, 0xbf, 0x37, 0xb4, 0xd6, 0x8f, 0x3a, 0xd4, 0xee, 0x44, 0x8e, + 0x60, 0x84, 0x7d, 0xb7, 0x60, 0x5c, 0xe0, 0x7d, 0x00, 0xe1, 0x78, 0x8c, 0xb3, 0xc8, 0x61, 0xdc, + 0x42, 0x4d, 0xbd, 0x5d, 0xdd, 0xde, 0xd8, 0x4a, 0x2f, 0xd8, 0x1a, 0x39, 0x1e, 0x1b, 0x2a, 0x5f, + 0x77, 0xf3, 0xd1, 0x49, 0x43, 0x7b, 0x72, 0xd2, 0xc0, 0xfb, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, + 0x59, 0x1e, 0x59, 0xa9, 0x81, 0x2f, 0x43, 0x69, 0x18, 0x2c, 0xa2, 0x09, 0xb3, 0xd6, 0x9a, 0xa8, + 0x5d, 0xdf, 0xbe, 0x94, 0x57, 0x5b, 0xbd, 0x79, 0x2b, 0x0e, 0xea, 0xf9, 0x0b, 0x8f, 0x24, 0x09, + 0xf8, 0x0a, 0x54, 0x3c, 0x26, 0xe8, 0x21, 0x15, 0xd4, 0xd2, 0x15, 0x15, 0x2b, 0x4f, 0xee, 0x33, + 0x11, 0x39, 0x93, 0x7e, 0xe2, 0xef, 0x16, 0x1e, 0x9d, 0x34, 0x10, 0xc9, 0xe2, 0xf1, 0x55, 0xd8, + 0xe4, 0xf7, 0x9d, 0x70, 0xec, 0xd2, 0x7b, 0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x07, 0xd4, 0x75, + 0x0e, 0xa9, 0x70, 0x02, 0xdf, 0x7a, 0x5e, 0x6e, 0xa2, 0x76, 0x85, 0xbc, 0x2d, 0x43, 0x6e, 0xcb, + 0x88, 0x01, 0xf5, 0xd8, 0x37, 0x99, 0x1f, 0xf7, 0x41, 0x27, 0x6c, 0x6a, 0xfd, 0x23, 0xc3, 0xaa, + 0xdb, 0xef, 0xae, 0xde, 0xfa, 0x52, 0x23, 0xbb, 0x17, 0x65, 0x1f, 0x8e, 0x4f, 0x1a, 0xe8, 0xc9, + 0x49, 0xe3, 0x6c, 0x9f, 0x89, 0xac, 0xd3, 0x6a, 0x00, 0xe4, 0xcf, 0xc3, 0x65, 0xd0, 0x3b, 0xfb, + 0xbb, 0xa6, 0x86, 0x2b, 0x50, 0x20, 0x07, 0xb7, 0x7b, 0x26, 0x6a, 0xfd, 0x85, 0x00, 0x0f, 0x45, + 0xc4, 0xa8, 0x77, 0x6a, 0x1a, 0x9b, 0x50, 0x19, 0x31, 0x9f, 0xfa, 0x62, 0xf7, 0xba, 0x85, 0x9a, + 0xa8, 0x6d, 0x90, 0xcc, 0xc6, 0x9f, 0x41, 0x39, 0x09, 0x53, 0x8d, 0xad, 0x6e, 0x5f, 0x78, 0x75, + 0x63, 0x49, 0x1a, 0x96, 0x3e, 0xea, 0xf9, 0x1b, 0x7a, 0xd4, 0xaf, 0x08, 0xd6, 0x93, 0x8b, 0x78, + 0x18, 0xf8, 0x9c, 0x61, 0x0c, 0x85, 0x49, 0x70, 0xc8, 0x14, 0xd5, 0x22, 0x51, 0x67, 0x6c, 0x41, + 0xd9, 0x8b, 0xf3, 0x15, 0x4d, 0x83, 0xa4, 0xa6, 0xf4, 0x0c, 0xa9, 0x17, 0xba, 0x8c, 0x5b, 0x7a, + 0x13, 0xb5, 0x75, 0x92, 0x9a, 0xd8, 0x06, 0xd8, 0x71, 0xb8, 0x08, 0x66, 0x11, 0xf5, 0xb8, 0x55, + 0x50, 0xce, 0x15, 0x04, 0xbf, 0x07, 0x46, 0xef, 0x21, 0xf3, 0x42, 0x97, 0x46, 0xdc, 0x2a, 0x2a, + 0x77, 0x0e, 0xb4, 0xfe, 0x45, 0x00, 0xb9, 0x4e, 0x71, 0x07, 0x4a, 0x4a, 0x03, 0xa9, 0x9a, 0xdf, + 0xca, 0xdf, 0xad, 0x26, 0xbf, 0x4f, 0x9d, 0xa8, 0xbb, 0x91, 0x88, 0xb9, 0xa6, 0xa0, 0xce, 0x21, + 0x0d, 0x05, 0x8b, 0x48, 0x92, 0x28, 0x5b, 0xcd, 0x13, 0xa6, 0x6b, 0xaa, 0x86, 0x99, 0xd7, 0x88, + 0x39, 0x2b, 0xf9, 0x69, 0x24, 0x0d, 0xc3, 0x5f, 0x82, 0xc1, 0x32, 0x86, 0xb1, 0x74, 0x71, 0x9e, + 0x93, 0x72, 0x4d, 0xb2, 0xf2, 0x50, 0x7c, 0x19, 0x60, 0xbe, 0xfa, 0xf2, 0x97, 0x08, 0x67, 0x3d, + 0x48, 0x32, 0x57, 0x82, 0x5b, 0x5f, 0x80, 0x91, 0xbd, 0x47, 0x4e, 0x42, 0x4a, 0x5e, 0x4d, 0xa2, + 0x46, 0xd4, 0x19, 0x6f, 0x40, 0xf1, 0x01, 0x75, 0x17, 0xf1, 0x1c, 0x6a, 0x24, 0x36, 0x5a, 0x1d, + 0x28, 0xc5, 0x4f, 0xc8, 0xfd, 0x32, 0x09, 0x25, 0x7e, 0x7c, 0x09, 0x6a, 0x6a, 0x99, 0x05, 0xf5, + 0xc2, 0xb1, 0xc7, 0x55, 0xb2, 0x4e, 0xaa, 0x19, 0xd6, 0xe7, 0xad, 0xdf, 0xd6, 0xa0, 0x7e, 0x7a, + 0x1b, 0xf1, 0x57, 0x50, 0x10, 0x47, 0x61, 0x5c, 0xaa, 0xbe, 0xfd, 0xfe, 0xeb, 0xb6, 0x36, 0x31, + 0x47, 0x47, 0x21, 0x23, 0x2a, 0x01, 0x7f, 0x02, 0xd8, 0x53, 0xd8, 0x78, 0x4a, 0x3d, 0xc7, 0x3d, + 0x52, 0x9b, 0x9b, 0x28, 0xc7, 0x8c, 0x3d, 0x37, 0x94, 0x43, 0x2e, 0xac, 0x7c, 0xe6, 0x9c, 0xb9, + 0xa1, 0x92, 0x88, 0x41, 0xd4, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x4a, 0x17, 0x06, 0x51, 0xe7, 0xd6, + 0x11, 0x40, 0x7e, 0x13, 0xae, 0x42, 0xf9, 0x60, 0x70, 0x6b, 0xb0, 0x77, 0x67, 0x60, 0x6a, 0xd2, + 0xb8, 0xb6, 0x77, 0x30, 0x18, 0xf5, 0x88, 0x89, 0xb0, 0x01, 0xc5, 0x9b, 0x9d, 0x83, 0x9b, 0x3d, + 0x73, 0x0d, 0xaf, 0x83, 0xb1, 0xb3, 0x3b, 0x1c, 0xed, 0xdd, 0x24, 0x9d, 0xbe, 0xa9, 0x63, 0x0c, + 0x75, 0xe5, 0xc9, 0xb1, 0x82, 0x4c, 0x1d, 0x1e, 0xf4, 0xfb, 0x1d, 0x72, 0xd7, 0x2c, 0xca, 0x5d, + 0xde, 0x1d, 0xdc, 0xd8, 0x33, 0x4b, 0xb8, 0x06, 0x95, 0xe1, 0xa8, 0x33, 0xea, 0x0d, 0x7b, 0x23, + 0xb3, 0xdc, 0xba, 0x05, 0xa5, 0xf8, 0xea, 0x37, 0x20, 0xc4, 0xd6, 0x4f, 0x08, 0x2a, 0xa9, 0x78, + 0xde, 0x84, 0xb0, 0x4f, 0x49, 0xe2, 0xb5, 0x23, 0xd7, 0xcf, 0x8e, 0xfc, 0xb8, 0x08, 0x46, 0x26, + 0x46, 0x7c, 0x11, 0x8c, 0x49, 0xb0, 0xf0, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x61, 0x47, 0x23, + 0x15, 0x05, 0xed, 0xfa, 0x02, 0x5f, 0x82, 0x6a, 0xec, 0x9e, 0xba, 0x01, 0x8d, 0xbf, 0x56, 0x68, + 0x47, 0x23, 0xa0, 0xc0, 0x1b, 0x12, 0xc3, 0x26, 0xe8, 0x7c, 0xe1, 0xa9, 0x9b, 0x10, 0x91, 0x47, + 0x7c, 0x01, 0x4a, 0x7c, 0x32, 0x67, 0x1e, 0x55, 0xc3, 0x3d, 0x4f, 0x12, 0x0b, 0x7f, 0x00, 0xf5, + 0xef, 0x59, 0x14, 0x8c, 0xc5, 0x3c, 0x62, 0x7c, 0x1e, 0xb8, 0x87, 0x6a, 0xd0, 0x88, 0xac, 0x4b, + 0x74, 0x94, 0x82, 0xf8, 0xc3, 0x24, 0x2c, 0xe7, 0x55, 0x52, 0xbc, 0x10, 0xa9, 0x49, 0xfc, 0x5a, + 0xca, 0xed, 0x63, 0x30, 0x57, 0xe2, 0x62, 0x82, 0x65, 0x45, 0x10, 0x91, 0x7a, 0x16, 0x19, 0x93, + 0xec, 0x40, 0xdd, 0x67, 0x33, 0x2a, 0x9c, 0x07, 0x6c, 0xcc, 0x43, 0xea, 0x73, 0xab, 0xf2, 0xf2, + 0xef, 0x63, 0x77, 0x31, 0xb9, 0xcf, 0xc4, 0x30, 0xa4, 0x7e, 0xb2, 0xa1, 0xeb, 0x69, 0x86, 0xc4, + 0x38, 0xfe, 0x08, 0xce, 0x65, 0x25, 0x0e, 0x99, 0x2b, 0x28, 0xb7, 0x8c, 0xa6, 0xde, 0xc6, 0x24, + 0xab, 0x7c, 0x5d, 0xa1, 0xa7, 0x02, 0x15, 0x37, 0x6e, 0x41, 0x53, 0x6f, 0xa3, 0x3c, 0x50, 0x11, + 0x93, 0x9f, 0xb7, 0x7a, 0x18, 0x70, 0x67, 0x85, 0x54, 0xf5, 0xff, 0x49, 0xa5, 0x19, 0x19, 0xa9, + 0xac, 0x44, 0x42, 0xaa, 0x16, 0x93, 0x4a, 0xe1, 0x9c, 0x54, 0x16, 0x98, 0x90, 0x5a, 0x8f, 0x49, + 0xa5, 0x70, 0x42, 0xea, 0x2a, 0x40, 0xc4, 0x38, 0x13, 0xe3, 0xb9, 0xec, 0x7c, 0x5d, 0x7d, 0x04, + 0x2e, 0xbe, 0xe2, 0x33, 0xb6, 0x45, 0x64, 0xd4, 0x8e, 0xe3, 0x0b, 0x62, 0x44, 0xe9, 0xf1, 0x8c, + 0xfe, 0xce, 0x9d, 0xd5, 0xdf, 0x15, 0x30, 0xb2, 0xd4, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, 0x6e, 0x6f, + 0x68, 0x22, 0x5c, 0x82, 0xb5, 0xc1, 0x9e, 0xb9, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, 0xf9, 0x0f, + 0x1b, 0x75, 0xcb, 0x50, 0x54, 0xe4, 0xbb, 0x35, 0x80, 0x7c, 0xf6, 0xad, 0xab, 0x00, 0x79, 0xa3, + 0xa4, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x58, 0xcf, 0xe7, 0x49, 0x62, 0x49, 0xdc, 0x65, 0xfe, 0x4c, + 0xcc, 0x95, 0x8c, 0xd7, 0x49, 0x62, 0x75, 0xbf, 0x3e, 0x7e, 0x6a, 0x6b, 0x8f, 0x9f, 0xda, 0xda, + 0x8b, 0xa7, 0x36, 0xfa, 0x61, 0x69, 0xa3, 0x3f, 0x97, 0x36, 0x7a, 0xb4, 0xb4, 0xd1, 0xf1, 0xd2, + 0x46, 0x7f, 0x2f, 0x6d, 0xf4, 0x7c, 0x69, 0x6b, 0x2f, 0x96, 0x36, 0xfa, 0xe5, 0x99, 0xad, 0x1d, + 0x3f, 0xb3, 0xb5, 0xc7, 0xcf, 0x6c, 0xed, 0xdb, 0xec, 0x9f, 0xdb, 0xbd, 0x92, 0xfa, 0xab, 0xf6, + 0xf9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x10, 0xfc, 0x83, 0xda, 0x09, 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { @@ -1164,6 +1192,15 @@ func (this *WriteResponse) Equal(that interface{}) bool { if this.Message != that1.Message { return false } + if this.Samples != that1.Samples { + return false + } + if this.Histograms != that1.Histograms { + return false + } + if this.Exemplars != that1.Exemplars { + return false + } return true } func (this *TimeSeries) Equal(that interface{}) bool { @@ -1638,10 +1675,13 @@ func (this *WriteResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 9) s = append(s, "&cortexpb.WriteResponse{") s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n") + s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1999,6 +2039,21 @@ func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Exemplars != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars)) + i-- + dAtA[i] = 0x28 + } + if m.Histograms != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Histograms)) + i-- + dAtA[i] = 0x20 + } + if m.Samples != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Samples)) + i-- + dAtA[i] = 0x18 + } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) @@ -2612,6 +2667,15 @@ func (m *WriteResponse) Size() (n int) { if l > 0 { n += 1 + l + sovCortex(uint64(l)) } + if m.Samples != 0 { + n += 1 + sovCortex(uint64(m.Samples)) + } + if m.Histograms != 0 { + n += 1 + sovCortex(uint64(m.Histograms)) + } + if m.Exemplars != 0 { + n += 1 + sovCortex(uint64(m.Exemplars)) + } return n } @@ -2906,6 +2970,9 @@ func (this *WriteResponse) String() string { s := strings.Join([]string{`&WriteResponse{`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, + `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`, + `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`, `}`, }, "") return s @@ -3566,6 +3633,63 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + m.Samples = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Samples |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + m.Histograms = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Histograms |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + m.Exemplars = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Exemplars |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index e40b04439aa..fa2caf287c2 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -36,14 +36,12 @@ message StreamWriteRequest { message WriteResponse { int32 code = 1; string message = 2; -} -message WriteResponse { // Samples represents X-Prometheus-Remote-Write-Written-Samples - int64 Samples = 1; + int64 Samples = 3; // Histograms represents X-Prometheus-Remote-Write-Written-Histograms - int64 Histograms = 2; + int64 Histograms = 4; // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars - int64 Exemplars = 3; + int64 Exemplars = 5; } message TimeSeries { diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 9e8cd833d2d..60fbf8e1492 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -1179,13 +1179,14 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time d.inflightClientRequests.Inc() defer d.inflightClientRequests.Dec() + var resp *cortexpb.WriteResponse if d.cfg.UseStreamPush { req := &cortexpb.WriteRequest{ Timeseries: timeseries, Metadata: metadata, Source: source, } - _, err = c.PushStreamConnection(ctx, req) + resp, err = c.PushStreamConnection(ctx, req) } else { req := cortexpb.PreallocWriteRequestFromPool() req.Timeseries = timeseries diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index bbe3e8d489c..6ca47078073 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -119,7 +119,7 @@ func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware } if remoteWrite2Enabled { - // follow Prometheus https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go + // follow Prometheus https://github.com/prometheus/prometheus/blob/v3.3.1/storage/remote/write_handler.go#L121 contentType := r.Header.Get("Content-Type") if contentType == "" { contentType = appProtoContentType @@ -225,7 +225,7 @@ func convertV2RequestToV1(req *writev2.Request) (cortexpb.PreallocWriteRequest, } func shouldConvertV2Metadata(metadata writev2.Metadata) bool { - return !(metadata.HelpRef == 0 && metadata.UnitRef == 0 && metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED) + return !(metadata.HelpRef == 0 && metadata.UnitRef == 0 && metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED) //nolint:staticcheck } func convertV2ToV1Histograms(histograms []writev2.Histogram) []cortexpb.Histogram { From 0d572e5b9357fc73f525681678c9367983b355a6 Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Wed, 23 Jul 2025 14:00:30 +0900 Subject: [PATCH 5/6] Change to expose header at Distributor side Signed-off-by: SungJin1212 --- integration/e2e/util.go | 43 +++++++++++++++ integration/remote_write_v2_test.go | 86 +++++++++++++++++++++++++++-- pkg/distributor/distributor.go | 31 ++++------- pkg/distributor/write_stats.go | 62 --------------------- pkg/distributor/write_stats_test.go | 41 -------------- pkg/ingester/ingester.go | 8 +-- 6 files changed, 136 insertions(+), 135 deletions(-) delete mode 100644 pkg/distributor/write_stats.go delete mode 100644 pkg/distributor/write_stats_test.go diff --git a/integration/e2e/util.go b/integration/e2e/util.go index 95f71bc0d35..d416d698527 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -499,3 +499,46 @@ func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Labe return } + +func GenerateV2SeriesWithSamples( + name string, + startTime time.Time, + scrapeInterval time.Duration, + startValue int, + numSamples int, + additionalLabels ...prompb.Label, +) (symbols []string, series writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(startTime) + durMillis := scrapeInterval.Milliseconds() + + st := writev2.NewSymbolTable() + lbs := labels.Labels{{Name: labels.MetricName, Value: name}} + + for _, label := range additionalLabels { + lbs = append(lbs, labels.Label{ + Name: label.Name, + Value: label.Value, + }) + } + + startTMillis := tsMillis + samples := make([]writev2.Sample, numSamples) + for i := 0; i < numSamples; i++ { + scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 + samples[i] = writev2.Sample{ + Timestamp: startTMillis + scrapeJitter, + Value: float64(i + startValue), + } + startTMillis += durMillis + } + + series = writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lbs, nil), + Samples: samples, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + } + + return st.Symbols(), series +} diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go index 4ebcc142077..eb02d51722c 100644 --- a/integration/remote_write_v2_test.go +++ b/integration/remote_write_v2_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -25,7 +26,7 @@ import ( func TestIngesterRollingUpdate(t *testing.T) { // Test ingester rolling update situation: when -distributor.remote-write2-enabled is true, and ingester uses the v1.19.0 image. - // Expected: remote write 2.0 push success, but response header values are set to "0". + // Expected: remote write 2.0 push success const blockRangePeriod = 5 * time.Second ingesterImage := "quay.io/cortexproject/cortex:v1.19.0" @@ -97,7 +98,7 @@ func TestIngesterRollingUpdate(t *testing.T) { res, err := c.PushV2(symbols1, series) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - testPushHeader(t, res.Header, "0", "0", "0") + testPushHeader(t, res.Header, "1", "0", "0") // sample result, err := c.Query("test_series", now) @@ -115,13 +116,13 @@ func TestIngesterRollingUpdate(t *testing.T) { res, err = c.PushV2(symbols2, histogramSeries) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - testPushHeader(t, res.Header, "0", "0", "0") + testPushHeader(t, res.Header, "0", "1", "0") - symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) res, err = c.PushV2(symbols3, histogramFloatSeries) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - testPushHeader(t, res.Header, "0", "0", "0") + testPushHeader(t, res.Header, "0", "1", "0") testHistogramTimestamp := now.Add(blockRangePeriod * 2) expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) @@ -138,6 +139,8 @@ func TestIngesterRollingUpdate(t *testing.T) { } func TestIngest_SenderSendPRW2_DistributorNotAllowPRW2(t *testing.T) { + // Test `-distributor.remote-write2-enabled=false` but the Sender pushes PRW2 + // Expected: status code is 200, but samples are not written. const blockRangePeriod = 5 * time.Second s, err := e2e.NewScenario(networkName) @@ -198,6 +201,11 @@ func TestIngest_SenderSendPRW2_DistributorNotAllowPRW2(t *testing.T) { res, err := c.PushV2(symbols1, series) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + require.Empty(t, result) } func TestIngest(t *testing.T) { @@ -281,7 +289,8 @@ func TestIngest(t *testing.T) { require.Equal(t, 200, res.StatusCode) testPushHeader(t, res.Header, "0", "1", "0") - symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + // float histogram + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) res, err = c.PushV2(symbols3, histogramFloatSeries) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) @@ -383,6 +392,71 @@ func TestExemplar(t *testing.T) { require.Equal(t, 1, len(exemplars)) } +func Test_WriteStatWithReplication(t *testing.T) { + // Test `X-Prometheus-Remote-Write-Samples-Written` header value + // with the replication. + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "3", + "-distributor.remote-write2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // Start Cortex components. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester2 := e2ecortex.NewIngester("ingester-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester3 := e2ecortex.NewIngester("ingester-3", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester1, ingester2, ingester3)) + + // Wait until distributor have updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + start := now.Add(-time.Minute * 10) + numSamples := 20 + scrapeInterval := 30 * time.Second + symbols, series := e2e.GenerateV2SeriesWithSamples("test_series", start, scrapeInterval, 0, numSamples, prompb.Label{Name: "job", Value: "test"}) + res, err := c.PushV2(symbols, []writev2.TimeSeries{series}) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "20", "0", "0") +} + func testPushHeader(t *testing.T, header http.Header, expectedSamples, expectedHistogram, expectedExemplars string) { require.Equal(t, expectedSamples, header.Get("X-Prometheus-Remote-Write-Samples-Written")) require.Equal(t, expectedHistogram, header.Get("X-Prometheus-Remote-Write-Histograms-Written")) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 60fbf8e1492..b7df9bc2af1 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -818,18 +818,19 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co keys := append(seriesKeys, metadataKeys...) initialMetadataIndex := len(seriesKeys) - ws := WriteStats{} - - err = d.doBatch(ctx, req, subRing, keys, initialMetadataIndex, validatedMetadata, validatedTimeseries, userID, &ws) + err = d.doBatch(ctx, req, subRing, keys, initialMetadataIndex, validatedMetadata, validatedTimeseries, userID) if err != nil { return nil, err } resp := &cortexpb.WriteResponse{} if d.cfg.RemoteWrite2Enabled { - resp.Samples = ws.LoadSamples() - resp.Histograms = ws.LoadHistogram() - resp.Exemplars = ws.LoadExemplars() + // We simply expose validated samples, histograms, and exemplars + // to the header. We should improve it to expose the actual + // written values by the Ingesters. + resp.Samples = int64(validatedFloatSamples) + resp.Histograms = int64(validatedHistogramSamples) + resp.Exemplars = int64(validatedExemplars) } return resp, firstPartialErr @@ -894,7 +895,7 @@ func (d *Distributor) cleanStaleIngesterMetrics() { } } -func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, subRing ring.ReadRing, keys []uint32, initialMetadataIndex int, validatedMetadata []*cortexpb.MetricMetadata, validatedTimeseries []cortexpb.PreallocTimeseries, userID string, ws *WriteStats) error { +func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, subRing ring.ReadRing, keys []uint32, initialMetadataIndex int, validatedMetadata []*cortexpb.MetricMetadata, validatedTimeseries []cortexpb.PreallocTimeseries, userID string) error { span, _ := opentracing.StartSpanFromContext(ctx, "doBatch") defer span.Finish() @@ -929,7 +930,7 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s } } - return d.send(localCtx, ingester, timeseries, metadata, req.Source, ws) + return d.send(localCtx, ingester, timeseries, metadata, req.Source) }, func() { cortexpb.ReuseSlice(req.Timeseries) req.Free() @@ -1163,7 +1164,7 @@ func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum, ws *WriteStats) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -1179,21 +1180,20 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time d.inflightClientRequests.Inc() defer d.inflightClientRequests.Dec() - var resp *cortexpb.WriteResponse if d.cfg.UseStreamPush { req := &cortexpb.WriteRequest{ Timeseries: timeseries, Metadata: metadata, Source: source, } - resp, err = c.PushStreamConnection(ctx, req) + _, err = c.PushStreamConnection(ctx, req) } else { req := cortexpb.PreallocWriteRequestFromPool() req.Timeseries = timeseries req.Metadata = metadata req.Source = source - resp, err = c.PushPreAlloc(ctx, req) + _, err = c.PushPreAlloc(ctx, req) // We should not reuse the req in case of errors: // See: https://github.com/grpc/grpc-go/issues/6355 @@ -1215,13 +1215,6 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time } } - if resp != nil { - // track write stats - ws.SetSamples(resp.Samples) - ws.SetHistograms(resp.Histograms) - ws.SetExemplars(resp.Exemplars) - } - return err } diff --git a/pkg/distributor/write_stats.go b/pkg/distributor/write_stats.go deleted file mode 100644 index 0f7fbc332d0..00000000000 --- a/pkg/distributor/write_stats.go +++ /dev/null @@ -1,62 +0,0 @@ -package distributor - -import ( - "go.uber.org/atomic" -) - -type WriteStats struct { - // Samples represents X-Prometheus-Remote-Write-Written-Samples - Samples atomic.Int64 - // Histograms represents X-Prometheus-Remote-Write-Written-Histograms - Histograms atomic.Int64 - // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars - Exemplars atomic.Int64 -} - -func (w *WriteStats) SetSamples(samples int64) { - if w == nil { - return - } - - w.Samples.Store(samples) -} - -func (w *WriteStats) SetHistograms(histograms int64) { - if w == nil { - return - } - - w.Histograms.Store(histograms) -} - -func (w *WriteStats) SetExemplars(exemplars int64) { - if w == nil { - return - } - - w.Exemplars.Store(exemplars) -} - -func (w *WriteStats) LoadSamples() int64 { - if w == nil { - return 0 - } - - return w.Samples.Load() -} - -func (w *WriteStats) LoadHistogram() int64 { - if w == nil { - return 0 - } - - return w.Histograms.Load() -} - -func (w *WriteStats) LoadExemplars() int64 { - if w == nil { - return 0 - } - - return w.Exemplars.Load() -} diff --git a/pkg/distributor/write_stats_test.go b/pkg/distributor/write_stats_test.go deleted file mode 100644 index 523f16788fe..00000000000 --- a/pkg/distributor/write_stats_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package distributor - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_SetAndLoad(t *testing.T) { - ws := &WriteStats{} - - t.Run("Samples", func(t *testing.T) { - ws.SetSamples(3) - assert.Equal(t, int64(3), ws.LoadSamples()) - }) - t.Run("Histograms", func(t *testing.T) { - ws.SetHistograms(10) - assert.Equal(t, int64(10), ws.LoadHistogram()) - }) - t.Run("Exemplars", func(t *testing.T) { - ws.SetExemplars(2) - assert.Equal(t, int64(2), ws.LoadExemplars()) - }) -} - -func Test_NilReceiver(t *testing.T) { - var ws *WriteStats - - t.Run("Samples", func(t *testing.T) { - ws.SetSamples(3) - assert.Equal(t, int64(0), ws.LoadSamples()) - }) - t.Run("Histograms", func(t *testing.T) { - ws.SetHistograms(10) - assert.Equal(t, int64(0), ws.LoadHistogram()) - }) - t.Run("Exemplars", func(t *testing.T) { - ws.SetExemplars(2) - assert.Equal(t, int64(0), ws.LoadExemplars()) - }) -} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 2ca25e9b6d3..c2dab4a54ec 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1569,13 +1569,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte return &cortexpb.WriteResponse{}, httpgrpc.Errorf(code, "%s", wrapWithUser(firstPartialErr, userID).Error()) } - writeResponse := &cortexpb.WriteResponse{ - Samples: int64(succeededSamplesCount), - Histograms: int64(succeededHistogramsCount), - Exemplars: int64(succeededExemplarsCount), - } - - return writeResponse, nil + return &cortexpb.WriteResponse{}, nil } func (i *Ingester) PushStream(srv client.Ingester_PushStreamServer) error { From 486726100a10c17a2e87acb78203c5126e796cc6 Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Fri, 1 Aug 2025 21:12:41 +0900 Subject: [PATCH 6/6] get rebase Signed-off-by: SungJin1212 --- integration/e2e/util.go | 28 ++++++++++++---------------- pkg/util/push/push.go | 6 +++--- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/integration/e2e/util.go b/integration/e2e/util.go index d416d698527..a7c164fea3b 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -429,10 +429,10 @@ func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogr tsMillis := TimeToMilliseconds(ts) st := writev2.NewSymbolTable() - - lbs := labels.Labels{labels.Label{Name: "__name__", Value: name}} + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) for _, lbl := range additionalLabels { - lbs = append(lbs, labels.Label{Name: lbl.Name, Value: lbl.Value}) + lb.Add(lbl.Name, lbl.Value) } var ( @@ -450,7 +450,7 @@ func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogr // Generate the series series = append(series, writev2.TimeSeries{ - LabelsRefs: st.SymbolizeLabels(lbs, nil), + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), Histograms: []writev2.Histogram{ph}, }) @@ -464,17 +464,15 @@ func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Labe value := rand.Float64() st := writev2.NewSymbolTable() - lbs := labels.Labels{{Name: labels.MetricName, Value: name}} + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) for _, label := range additionalLabels { - lbs = append(lbs, labels.Label{ - Name: label.Name, - Value: label.Value, - }) + lb.Add(label.Name, label.Value) } series = append(series, writev2.TimeSeries{ // Generate the series - LabelsRefs: st.SymbolizeLabels(lbs, nil), + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), Samples: []writev2.Sample{ {Value: value, Timestamp: tsMillis}, }, @@ -512,13 +510,11 @@ func GenerateV2SeriesWithSamples( durMillis := scrapeInterval.Milliseconds() st := writev2.NewSymbolTable() - lbs := labels.Labels{{Name: labels.MetricName, Value: name}} + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) for _, label := range additionalLabels { - lbs = append(lbs, labels.Label{ - Name: label.Name, - Value: label.Value, - }) + lb.Add(label.Name, label.Value) } startTMillis := tsMillis @@ -533,7 +529,7 @@ func GenerateV2SeriesWithSamples( } series = writev2.TimeSeries{ - LabelsRefs: st.SymbolizeLabels(lbs, nil), + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), Samples: samples, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 6ca47078073..17413f0e4dd 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -11,7 +11,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" - "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/util/compression" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" @@ -140,8 +140,8 @@ func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware enc := r.Header.Get("Content-Encoding") if enc == "" { - } else if enc != string(remote.SnappyBlockCompression) { - err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, remote.SnappyBlockCompression) + } else if enc != compression.Snappy { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, compression.Snappy) level.Error(logger).Log("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return