From 2e4649755b8201a6d7928e7b1fc6242e8f3257c1 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 31 Jan 2023 09:41:49 -0800 Subject: [PATCH 1/2] Update thanos Signed-off-by: Alan Protasio --- go.mod | 32 +- go.sum | 97 +- .../go/compute/internal/version.go | 2 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/metadata.go | 1 + vendor/cloud.google.com/go/iam/CHANGES.md | 14 + .../go/iam/apiv1/iampb}/iam_policy.pb.go | 4 +- .../go/iam/apiv1/iampb}/options.pb.go | 4 +- .../go/iam/apiv1/iampb}/policy.pb.go | 76 +- .../google/pprof/profile/legacy_profile.go | 1 - .../github.com/google/pprof/profile/merge.go | 99 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 8 + .../googleapis/gax-go/v2/apierror/apierror.go | 45 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- vendor/github.com/grafana/regexp/README.md | 7 - vendor/github.com/grafana/regexp/backtrack.go | 26 +- vendor/github.com/grafana/regexp/exec.go | 71 +- vendor/github.com/grafana/regexp/onepass.go | 26 +- vendor/github.com/grafana/regexp/regexp.go | 40 +- .../github.com/grafana/regexp/syntax/prog.go | 25 +- .../prometheus/common/model/value.go | 264 ++- .../prometheus/common/model/value_float.go | 101 + .../common/model/value_histogram.go | 171 ++ .../prometheus/common/model/value_type.go | 83 + .../prometheus/discovery/file/file.go | 16 +- .../prometheus/model/labels/labels.go | 15 + .../prometheus/model/relabel/relabel.go | 29 +- .../prometheus/notifier/notifier.go | 38 - .../prometheus/prometheus/prompb/custom.go | 17 + .../prometheus/prometheus/promql/functions.go | 2 +- .../prometheus/promql/parser/parse.go | 2 +- .../prometheus/prometheus/promql/test.go | 4 +- .../prometheus/prometheus/rules/manager.go | 9 + .../prometheus/prometheus/scrape/scrape.go | 4 + .../prometheus/storage/interface.go | 3 +- .../prometheus/storage/remote/codec.go | 11 +- .../prometheus/storage/remote/read_handler.go | 4 + .../storage/remote/write_handler.go | 19 +- .../prometheus/prometheus/tsdb/compact.go | 2 +- .../prometheus/prometheus/tsdb/db.go | 6 +- .../prometheus/prometheus/tsdb/head_append.go | 10 +- .../prometheus/prometheus/tsdb/index/index.go | 68 +- .../prometheus/prometheus/tsdb/querier.go | 33 +- .../execution/function/functions.go | 9 + .../pkg/block/indexheader/binary_reader.go | 450 +++-- .../block/indexheader/lazy_binary_reader.go | 39 +- .../thanos-io/thanos/pkg/dedup/iter.go | 102 +- .../thanos/pkg/dedup/pushdown_iter.go | 5 +- .../thanos-io/thanos/pkg/gate/gate.go | 9 +- .../thanos-io/thanos/pkg/store/bucket.go | 29 +- .../thanos-io/thanos/pkg/store/proxy.go | 4 +- .../thanos-io/thanos/pkg/store/recover.go | 52 + .../thanos/pkg/store/storepb/custom.go | 28 + .../thanos/pkg/store/storepb/inprocess.go | 8 + .../pkg/store/storepb/prompb/types.pb.go | 1652 +++++++++++++++-- .../pkg/store/storepb/prompb/types.proto | 68 + .../thanos/pkg/store/storepb/types.pb.go | 78 +- .../thanos/pkg/store/storepb/types.proto | 1 + vendor/go.opencensus.io/Makefile | 8 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ocgrpc/client_metrics.go | 9 + .../plugin/ocgrpc/server_metrics.go | 9 + .../plugin/ocgrpc/stats_common.go | 23 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 6 +- .../go.opencensus.io/stats/view/collector.go | 9 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/worker.go | 27 +- vendor/go.opencensus.io/tag/profile_19.go | 1 + vendor/go.opencensus.io/tag/profile_not19.go | 1 + vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../net/http/otelhttp/handler.go | 23 +- .../net/http/otelhttp/version.go | 2 +- .../instrument/asyncfloat64/asyncfloat64.go | 16 +- .../instrument/asyncint64/asyncint64.go | 16 +- .../instrument/syncfloat64/syncfloat64.go | 8 + .../metric/instrument/syncint64/syncint64.go | 8 + .../go.opentelemetry.io/otel/metric/meter.go | 4 + vendor/golang.org/x/exp/slices/slices.go | 11 +- vendor/golang.org/x/exp/slices/sort.go | 2 +- .../api/googleapi/googleapi.go | 12 + .../iamcredentials/v1/iamcredentials-gen.go | 24 +- .../api/internal/gensupport/error.go | 24 + .../google.golang.org/api/internal/version.go | 2 +- vendor/google.golang.org/api/option/option.go | 4 +- .../api/storage/v1/storage-gen.go | 284 +-- .../api/transport/grpc/dial.go | 48 +- .../api/transport/grpc/dial_socketopt.go | 6 +- .../googleapis/api/annotations/client.pb.go | 1558 +++++++++++++++- .../googleapis/api/launch_stage.pb.go | 203 ++ .../genproto/googleapis/iam/v1/alias.go | 208 +++ vendor/modules.txt | 34 +- 99 files changed, 5431 insertions(+), 1256 deletions(-) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/iam_policy.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/options.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/policy.pb.go (94%) create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/recover.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/error.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go diff --git a/go.mod b/go.mod index 1973920170a..bc8390bb66e 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.0 - github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/consul/api v1.18.0 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -44,15 +44,15 @@ require ( github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.39.0 - github.com/prometheus/prometheus v0.40.7 + github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 + github.com/prometheus/prometheus v0.41.0 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 + github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 - github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a + github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.6 @@ -77,9 +77,9 @@ require ( require ( cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - cloud.google.com/go/iam v0.6.0 // indirect + cloud.google.com/go/compute v1.13.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect @@ -133,10 +133,10 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect + github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -190,20 +190,20 @@ require ( github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.33.0 // indirect + go.opentelemetry.io/otel/metric v0.34.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect + golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/oauth2 v0.3.0 // indirect golang.org/x/sys v0.4.0 // indirect @@ -211,9 +211,9 @@ require ( golang.org/x/tools v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.102.0 // indirect + google.golang.org/api v0.104.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect + google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect diff --git a/go.sum b/go.sum index f841f040f93..0fe32354de1 100644 --- a/go.sum +++ b/go.sum @@ -47,17 +47,17 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -475,7 +475,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/digitalocean/godo v1.88.0 h1:SAEdw63xOMmzlwCeCWjLH1GcyDPUjbSAR1Bh7VELxzc= +github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -513,12 +513,12 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= -github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= +github.com/efficientgo/e2e v0.14.1-0.20230119090947-fa7ceb0197c5 h1:N1fHVcNEPMJNB93sxT6icl5yvoFxa2sp3/1NnVlrAFc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -533,7 +533,7 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/envoyproxy/protoc-gen-validate v0.6.13 h1:TvDcILLkjuZV3ER58VkBmncKsLUBqBDxra/XctCzuMM= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= @@ -803,8 +803,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io= -github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI= +github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -822,11 +822,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= +github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -844,8 +844,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b h1:UlCBLaqvS4wVYNrMKSfqTBVsed/EOY9dnenhYZMUnrA= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -920,11 +920,11 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005 h1:jKwXhVS4F7qk0g8laz+Anz0g/6yaSJ3HqmSAuSNLUcA= +github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= +github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1288,7 +1288,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= -github.com/ovh/go-ovh v1.1.0 h1:bHXZmw8nTgZin4Nv7JuaLs0KG5x54EQR7migYTd1zrk= +github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1364,8 +1364,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 h1:hK3y58iUBjRFZ6kFNJTWsES1GnVKsqEYUeiyeRXridQ= +github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= @@ -1386,8 +1386,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.40.7 h1:cYtp4YrR9M99YpTUfXbei/HjIJJ+En23NKsTCeZ2U2w= -github.com/prometheus/prometheus v0.40.7/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U= +github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= +github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1416,7 +1416,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1508,12 +1508,12 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 h1:54iqf5p40TFGTc2Dp791P1/ncO2sTqvXdMLXqNpC/Gc= -github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= +github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 h1:D0ruzvS/U+49eMLg/7IIYK2iMdVP4PrJLpVHgguMVkg= +github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= -github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a h1:oN3VupYNkavPRvdXwq71p54SAFSbOGvL0qL7CeKFrJ0= -github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a/go.mod h1:xZKr/xpbijYM8EqTMpurqgKItyIPFy1wqUd/DMTDu4M= +github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 h1:8/GjzkmjltOUSO+7FZQhhpTj7SPDXZwY5bWassilNTo= +github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00/go.mod h1:oTvgV468yMvGgECD06eWuVWw0OBTHcznyT2BZl5rgeA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1609,14 +1609,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= go.opentelemetry.io/contrib/propagators/aws v1.12.0 h1:n3lNyZs2ytVEfFhcn2QO5Z80lgrMXyP1Ncx0C7rUU8A= @@ -1645,8 +1646,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKP go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E= -go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI= +go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= +go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= @@ -1732,8 +1733,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE= -golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2222,8 +2223,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= +google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2329,8 +2330,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2457,13 +2458,13 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2472,7 +2473,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= +k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= @@ -2492,17 +2493,17 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= @@ -2536,7 +2537,7 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 5ac4a843e11..efedadbea25 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.1" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 8631b6d6d2c..6e3ee8d6ab1 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + ## [0.1.0] (2022-10-26) Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d343..d4aad9bf395 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index c4ead20e09e..ced217827b0 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + ## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 6fbf54f4489..2793098aabc 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/iam_policy.proto -package iam +package iampb import ( context "context" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index abea46d9bc1..835f2171998 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/options.proto -package iam +package iampb import ( reflect "reflect" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go similarity index 94% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 5869d92070e..ec7777a7687 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/policy.proto -package iam +package iampb import ( reflect "reflect" @@ -279,11 +279,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a policy - // that includes conditions + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -396,47 +396,43 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 9ba9a77c924..8d07fd6c27c 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -865,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 90c5723d78f..4b66282cb8e 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -566,3 +566,102 @@ func (lm locationIDMap) set(id uint64, loc *Location) { } lm.sparse[id] = loc } + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 7ea75bfdc37..d88960b7ef1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.6.0" + "v2": "2.7.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index fb8463a2cb1..b75170f2227 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + ## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 787a619e373..aa6be1304f1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -233,30 +233,49 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 065312e827c..0ba5da1dd1e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.6.0" +const Version = "2.7.0" diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md index ff6e7fccd1b..756e60dcfdb 100644 --- a/vendor/github.com/grafana/regexp/README.md +++ b/vendor/github.com/grafana/regexp/README.md @@ -10,10 +10,3 @@ The `main` branch is non-optimised: switch over to [`speedup`](https://github.co ## Benchmarks: ![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) - -## Links to upstream changes: -* [regexp: allow patterns with no alternates to be one-pass](https://go-review.googlesource.com/c/go/+/353711) -* [regexp: speed up onepass prefix check](https://go-review.googlesource.com/c/go/+/354909) -* [regexp: handle prefix string with fold-case](https://go-review.googlesource.com/c/go/+/358756) -* [regexp: avoid copying each instruction executed](https://go-review.googlesource.com/c/go/+/355789) -* [regexp: allow prefix string anchored at beginning](https://go-review.googlesource.com/c/go/+/377294) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index a8111a7eb8e..0739f5ff588 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -15,9 +15,8 @@ package regexp import ( + "regexp/syntax" "sync" - - "github.com/grafana/regexp/syntax" ) // A job is an entry on the backtracker's job stack. It holds @@ -337,19 +336,9 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in // but we are not clearing visited between calls to TrySearch, // so no work is duplicated and it ends up still being linear. width := -1 - for pos <= end && width != 0 { - if len(b.cap) > 0 { - b.cap[0] = pos - } - if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { - // Match must be leftmost; done. - goto Match - } - _, width = i.step(pos) - pos += width - + for ; pos <= end && width != 0; pos += width { if len(re.prefix) > 0 { - // Match requires literal prefix; fast search for next occurrence. + // Match requires literal prefix; fast search for it. advance := i.index(re, pos) if advance < 0 { freeBitState(b) @@ -357,6 +346,15 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in } pos += advance } + + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) } freeBitState(b) return nil diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go index 11a5eebfe25..3fc4b684feb 100644 --- a/vendor/github.com/grafana/regexp/exec.go +++ b/vendor/github.com/grafana/regexp/exec.go @@ -6,9 +6,8 @@ package regexp import ( "io" + "regexp/syntax" "sync" - - "github.com/grafana/regexp/syntax" ) // A queue is a 'sparse array' holding pending threads of execution. @@ -205,8 +204,6 @@ func (m *machine) match(i input, pos int) bool { // Have match; finished exploring alternatives. break } - // Note we don't check foldCase here, because Unicode folding is complicated; - // just let it fall through to EqualFold on the whole string. if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { // Match requires literal prefix; fast search for it. advance := i.index(m.re, pos) @@ -404,46 +401,45 @@ func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap in } m := newOnePassMachine() - matched := false - i, _ := m.inputs.init(ir, ib, is) - - r, r1 := endOfText, endOfText - width, width1 := 0, 0 - var flag lazyFlag - var pc int - var inst *onePassInst - - // If there is a simple literal prefix, skip over it. - if pos == 0 && len(re.prefix) > 0 && i.canCheckPrefix() { - // Match requires literal prefix; fast search for it. - if !i.hasPrefix(re) { - goto Return - } - pos += len(re.prefix) - pc = int(re.prefixEnd) - } else { - pc = re.onepass.Start - } - if cap(m.matchcap) < ncap { m.matchcap = make([]int, ncap) } else { m.matchcap = m.matchcap[:ncap] } + matched := false for i := range m.matchcap { m.matchcap[i] = -1 } + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag if pos == 0 { flag = newLazyFlag(-1, r) } else { flag = i.context(pos) } + pc := re.onepass.Start + inst := &re.onepass.Inst[pc] // If there is a simple literal prefix, skip over it. - if r != endOfText { + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + r, width = i.step(pos) r1, width1 = i.step(pos + width) + flag = i.context(pos) + pc = int(re.prefixEnd) } for { inst = &re.onepass.Inst[pc] @@ -532,29 +528,6 @@ func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap i return nil } - // Check prefix match before allocating data structures - if len(re.prefix) > 0 && r == nil { - if re.cond&syntax.EmptyBeginText != 0 { // anchored - if b != nil && !(&inputBytes{str: b[pos:]}).hasPrefix(re) { - return nil - } - if s != "" && !(&inputString{str: s[pos:]}).hasPrefix(re) { - return nil - } - } else { // non-anchored - var advance int - if b != nil { - advance = (&inputBytes{str: b}).index(re, pos) - } else { - advance = (&inputString{str: s}).index(re, pos) - } - if advance < 0 { - return nil - } - pos += advance - } - } - if re.onepass != nil { return re.doOnePass(r, b, s, pos, ncap, dstCap) } diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index d3613bceaf7..bc47f4c4a83 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -5,12 +5,11 @@ package regexp import ( + "regexp/syntax" "sort" "strings" "unicode" "unicode/utf8" - - "github.com/grafana/regexp/syntax" ) // "One-pass" regexp execution. @@ -39,10 +38,10 @@ type onePassInst struct { // is the entire match. Pc is the index of the last rune instruction // in the string. The OnePassPrefix skips over the mandatory // EmptyBeginText -func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, pc uint32) { +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { - return "", i.Op == syntax.InstMatch, false, uint32(p.Start) + return "", i.Op == syntax.InstMatch, uint32(p.Start) } pc = i.Out i = &p.Inst[pc] @@ -52,13 +51,12 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, } // Avoid allocation of buffer if prefix is empty. if iop(i) != syntax.InstRune || len(i.Rune) != 1 { - return "", i.Op == syntax.InstMatch, false, uint32(p.Start) + return "", i.Op == syntax.InstMatch, uint32(p.Start) } - foldCase = (syntax.Flags(i.Arg)&syntax.FoldCase != 0) // Have prefix; gather characters. var buf strings.Builder - for iop(i) == syntax.InstRune && len(i.Rune) == 1 && (syntax.Flags(i.Arg)&syntax.FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) pc, i = i.Out, &p.Inst[i.Out] } @@ -67,7 +65,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, p.Inst[i.Out].Op == syntax.InstMatch { complete = true } - return buf.String(), complete, foldCase, pc + return buf.String(), complete, pc } // OnePassNext selects the next actionable state of the prog, based on the input character. @@ -474,20 +472,12 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) { syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { return nil } - hasAlt := false - for _, inst := range prog.Inst { - if inst.Op == syntax.InstAlt || inst.Op == syntax.InstAltMatch { - hasAlt = true - break - } - } - // If we have alternates, every instruction leading to InstMatch must be EmptyEndText. - // Also, any match on empty text must be $. + // every instruction leading to InstMatch must be EmptyEndText for _, inst := range prog.Inst { opOut := prog.Inst[inst.Out].Op switch inst.Op { default: - if opOut == syntax.InstMatch && hasAlt { + if opOut == syntax.InstMatch { return nil } case syntax.InstAlt, syntax.InstAltMatch: diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index 1479e373174..7958a397285 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -72,13 +72,12 @@ package regexp import ( "bytes" "io" + "regexp/syntax" "strconv" "strings" "sync" "unicode" "unicode/utf8" - - "github.com/grafana/regexp/syntax" ) // Regexp is the representation of a compiled regular expression. @@ -95,7 +94,6 @@ type Regexp struct { prefixBytes []byte // prefix, as a []byte prefixRune rune // first rune in prefix prefixEnd uint32 // pc for last rune in prefix - prefixFoldCase bool // check prefix case-insensitive mpool int // pool for machines matchcap int // size of recorded match lengths prefixComplete bool // prefix is the entire regexp @@ -201,10 +199,10 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { minInputLen: minInputLen(re), } if regexp.onepass == nil { - regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase = prog.PrefixAndCase() + regexp.prefix, regexp.prefixComplete = prog.Prefix() regexp.maxBitStateLen = maxBitStateLen(prog) } else { - regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase, regexp.prefixEnd = onePassPrefix(prog) + regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) } if regexp.prefix != "" { // TODO(rsc): Remove this allocation by adding @@ -406,26 +404,10 @@ func (i *inputString) canCheckPrefix() bool { } func (i *inputString) hasPrefix(re *Regexp) bool { - if re.prefixFoldCase { - n := len(re.prefix) - return len(i.str) >= n && strings.EqualFold(i.str[0:n], re.prefix) - } return strings.HasPrefix(i.str, re.prefix) } func (i *inputString) index(re *Regexp, pos int) int { - if re.prefixFoldCase { - n := len(re.prefix) - // Brute-force compare at every position; could replace with sophisticated algo like strings.Index - for p := pos; p+n <= len(i.str); { - if strings.EqualFold(i.str[p:p+n], re.prefix) { - return p - pos - } - _, w := i.step(p) - p += w - } - return -1 - } return strings.Index(i.str[pos:], re.prefix) } @@ -469,26 +451,10 @@ func (i *inputBytes) canCheckPrefix() bool { } func (i *inputBytes) hasPrefix(re *Regexp) bool { - if re.prefixFoldCase { - n := len(re.prefixBytes) - return len(i.str) >= n && bytes.EqualFold(i.str[0:n], re.prefixBytes) - } return bytes.HasPrefix(i.str, re.prefixBytes) } func (i *inputBytes) index(re *Regexp, pos int) int { - if re.prefixFoldCase { - n := len(re.prefixBytes) - // Brute-force compare at every position; could replace with sophisticated algo like bytes.Index - for p := pos; p+n <= len(i.str); { - if bytes.EqualFold(i.str[p:p+n], re.prefixBytes) { - return p - pos - } - _, w := i.step(p) - p += w - } - return -1 - } return bytes.Index(i.str[pos:], re.prefixBytes) } diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index 2bf1dcaac02..896cdc42c2a 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -146,37 +146,20 @@ func (i *Inst) op() InstOp { // regexp must start with. Complete is true if the prefix // is the entire match. func (p *Prog) Prefix() (prefix string, complete bool) { - prefix, complete, foldCase := p.PrefixAndCase() - if foldCase { - return "", false - } - return prefix, complete -} - -// Prefix returns a literal string that all matches for the -// regexp must start with. Complete is true if the prefix -// is the entire match. FoldCase is true if the string should -// match in upper or lower case. -func (p *Prog) PrefixAndCase() (prefix string, complete bool, foldCase bool) { - i := &p.Inst[p.Start] - // Skip any no-op, capturing or begin-text instructions - for i.Op == InstNop || i.Op == InstCapture || (i.Op == InstEmptyWidth && EmptyOp(i.Arg)&EmptyBeginText != 0) { - i = &p.Inst[i.Out] - } + i := p.skipNop(uint32(p.Start)) // Avoid allocation of buffer if prefix is empty. if i.op() != InstRune || len(i.Rune) != 1 { - return "", i.Op == InstMatch, false + return "", i.Op == InstMatch } // Have prefix; gather characters. var buf strings.Builder - foldCase = (Flags(i.Arg)&FoldCase != 0) - for i.op() == InstRune && len(i.Rune) == 1 && (Flags(i.Arg)&FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { + for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) i = p.skipNop(i.Out) } - return buf.String(), i.Op == InstMatch, foldCase + return buf.String(), i.Op == InstMatch } // StartCond returns the leading empty-width conditions that must diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a283..543399e36e6 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: *s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: *s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,43 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } +type sampleHistogramPairPtr struct { + Timestamp Time + Histogram *SampleHistogram +} + +func (s *sampleHistogramPairPtr) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + // UnmarshalJSON implements json.Unmarshaler. +// TODO: simplify and remove the need for both sampleHistogramPairPtr and SampleHistogramPair func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram sampleHistogramPairPtr `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: sampleHistogramPairPtr{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +141,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +193,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 00000000000..9266fb9854c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 00000000000..a4be0bca311 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,171 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + Histogram SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(&o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 00000000000..726c50ee638 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 5aa7f2e5f43..c45595c6ddc 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -39,18 +39,23 @@ import ( ) var ( + fileSDReadErrorsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }) fileSDScanDuration = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "prometheus_sd_file_scan_duration_seconds", Help: "The duration of the File-SD scan in seconds.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) - fileSDReadErrorsCount = prometheus.NewCounter( + fileSDTimeStamp = NewTimestampCollector() + fileWatcherErrorsCount = prometheus.NewCounter( prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", }) - fileSDTimeStamp = NewTimestampCollector() patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) @@ -62,7 +67,7 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp) + prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -237,6 +242,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + fileWatcherErrorsCount.Inc() return } d.watcher = watcher diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 48237bdc0ee..aafba218aa9 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -20,6 +20,7 @@ import ( "strconv" "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" ) // Well-known label names used by Prometheus components. @@ -134,6 +135,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { } // Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. func (ls Labels) Hash() uint64 { // Use xxhash.Sum64(b) for fast path as it's faster. b := make([]byte, 0, 1024) @@ -311,6 +313,19 @@ func (ls Labels) WithoutEmpty() Labels { return ls } +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + for _, l := range ls { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return false + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return false + } + } + return true +} + // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index e0d7f6ddf54..c731f6e0d3b 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -45,6 +45,10 @@ const ( Keep Action = "keep" // Drop drops targets for which the input does match the regex. Drop Action = "drop" + // KeepEqual drops targets for which the input does not match the target. + KeepEqual Action = "keepequal" + // Drop drops targets for which the input does match the target. + DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" // LabelMap copies labels to other labelnames based on a regex. @@ -66,7 +70,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch act := Action(strings.ToLower(s)); act { - case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase: + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase, KeepEqual, DropEqual: *a = act return nil } @@ -109,13 +113,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Modulus == 0 && c.Action == HashMod { return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") } - if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" { + if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) { + if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement { + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { @@ -125,6 +129,15 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } + if c.Action == DropEqual || c.Action == KeepEqual { + if c.Regex != DefaultRelabelConfig.Regex || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", c.Action) + } + } + if c.Action == LabelDrop || c.Action == LabelKeep { if c.SourceLabels != nil || c.TargetLabel != DefaultRelabelConfig.TargetLabel || @@ -225,6 +238,14 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels if !cfg.Regex.MatchString(val) { return nil } + case DropEqual: + if lset.Get(cfg.TargetLabel) == val { + return nil + } + case KeepEqual: + if lset.Get(cfg.TargetLabel) != val { + return nil + } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 99886bd4bc3..fd89a029c7d 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -19,11 +19,9 @@ import ( "encoding/json" "fmt" "io" - "net" "net/http" "net/url" "path" - "strings" "sync" "time" @@ -727,47 +725,11 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig continue } - lb := labels.NewBuilder(lset) - - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) bool { - // If we can split, a port exists and we don't have to add one. - if _, _, err := net.SplitHostPort(s); err == nil { - return false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return err == nil - } addr := lset.Get(model.AddressLabel) - // If it's an address with no trailing port, infer it based on the used scheme. - if addPort(addr) { - // Addresses reaching this point are already wrapped in [] if necessary. - switch lset.Get(model.SchemeLabel) { - case "http", "": - addr = addr + ":80" - case "https": - addr = addr + ":443" - default: - return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - // Meta labels are deleted after relabelling. Other internal labels propagate to - // the target which decides whether they will be part of their label set. - for _, l := range lset { - if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { - lb.Del(l.Name) - } - } - res = append(res, alertmanagerLabels{lset}) } return res, droppedAlertManagers, nil diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 0b3820c4d28..4b07187bd28 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -13,5 +13,22 @@ package prompb +import ( + "sync" +) + func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 8e9e6f96548..d481cb7358b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -302,7 +302,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // The trend factor argument. tf := vals[2].(Vector)[0].V - // Sanity check the input. + // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 7a0e48464be..6c37ce6fc6a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -432,7 +432,7 @@ func (p *parser) expectType(node Node, want ValueType, context string) { } } -// checkAST checks the sanity of the provided AST. This includes type checking. +// checkAST checks the validity of the provided AST. This includes type checking. func (p *parser) checkAST(node Node) (typ ValueType) { // For expressions the type is determined by their Type function. // Lists do not have a type but are not invalid either. diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 41526ccdc5f..ab2a159c4ee 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -491,8 +491,8 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa }) if containsNonStepInvariant { - // Since there is a step invariant function, we cannot automatically - // generate step invariant test cases for it sanely. + // Expression contains a function whose result can vary with evaluation + // time, even though its arguments are step invariant: skip it. return nil, nil } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index ced61ea4925..42f1b59ce0a 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -673,6 +673,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { rule.SetLastError(err) sp.SetStatus(codes.Error, err.Error()) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ @@ -700,6 +703,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Series no longer exposed, mark it stale. _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case unwrappedErr == nil: case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): @@ -727,6 +733,9 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Rule that produced series no longer configured, mark it stale. _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case unwrappedErr == nil: case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 04375ab564b..e9d172a3e8a 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -1609,6 +1609,10 @@ loop: err = errNameLabelMandatory break loop } + if !lset.IsValid() { + err = fmt.Errorf("invalid metric name or label names: %s", lset.String()) + break loop + } // If any label limits is exceeded the scrape should fail. if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 9df4b75f5be..22d3b418602 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -248,7 +248,8 @@ type GetRef interface { // Returns reference number that can be used to pass to Appender.Append(), // and a set of labels that will not cause another copy when passed to Appender.Append(). // 0 means the appender does not have a reference to this series. - GetRef(lset labels.Labels) (SeriesRef, labels.Labels) + // hash should be a hash of lset. + GetRef(lset labels.Labels, hash uint64) (SeriesRef, labels.Labels) } // ExemplarAppender provides an interface for adding samples to exemplar storage, which diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 9b6f5a5ab18..48c2d8615f8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -20,6 +20,7 @@ import ( "net/http" "sort" "strings" + "sync" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" @@ -193,6 +194,7 @@ func StreamChunkedReadResponses( ss storage.ChunkSeriesSet, sortedExternalLabels []prompb.Label, maxBytesInFrame int, + marshalPool *sync.Pool, ) (storage.Warnings, error) { var ( chks []prompb.Chunk @@ -234,12 +236,14 @@ func StreamChunkedReadResponses( continue } - b, err := proto.Marshal(&prompb.ChunkedReadResponse{ + resp := &prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{ {Labels: lbls, Chunks: chks}, }, QueryIndex: queryIndex, - }) + } + + b, err := resp.PooledMarshal(marshalPool) if err != nil { return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err) } @@ -247,6 +251,9 @@ func StreamChunkedReadResponses( if _, err := stream.Write(b); err != nil { return ss.Warnings(), fmt.Errorf("write to stream: %w", err) } + + // We immediately flush the Write() so it is safe to return to the pool. + marshalPool.Put(&b) chks = chks[:0] } if err := iter.Err(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index e1f1df21c19..116eb9596c4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -17,6 +17,7 @@ import ( "context" "net/http" "sort" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -37,6 +38,7 @@ type readHandler struct { remoteReadMaxBytesInFrame int remoteReadGate *gate.Gate queries prometheus.Gauge + marshalPool *sync.Pool } // NewReadHandler creates a http.Handler that accepts remote read requests and @@ -49,6 +51,7 @@ func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storag remoteReadSampleLimit: remoteReadSampleLimit, remoteReadGate: gate.New(remoteReadConcurrencyLimit), remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame, + marshalPool: &sync.Pool{}, queries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "prometheus", @@ -225,6 +228,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re querier.Select(true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, + h.marshalPool, ) if err != nil { return err diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index ad1f0f3ae1b..7a2a9951cf6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -67,11 +67,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause. func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { - unwrapedErr := errors.Unwrap(err) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { - case errors.Is(unwrapedErr, storage.ErrNotFound): + case errors.Is(unwrappedErr, storage.ErrNotFound): return storage.ErrNotFound - case errors.Is(unwrapedErr, storage.ErrOutOfOrderExemplar): + case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar): *outOfOrderErrs++ level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) return nil @@ -98,8 +101,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, s := range ts.Samples { _, err = app.Append(0, labels, s.Timestamp, s.Value) if err != nil { - unwrapedErr := errors.Unwrap(err) - if errors.Is(unwrapedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrapedErr, storage.ErrOutOfBounds) || errors.Is(unwrapedErr, storage.ErrDuplicateSampleForTimestamp) { + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } + if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err @@ -123,6 +129,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err _, err = app.AppendHistogram(0, labels, hp.Timestamp, hs) if err != nil { unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } // Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is // a note indicating its inclusion in the future. if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 08fd27a310f..9fe50fda1dd 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -727,7 +727,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } all = indexr.SortedPostings(all) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) + sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index def4442f520..7a165e431b1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -66,7 +66,7 @@ const ( // ErrNotReady is returned if the underlying storage is not ready yet. var ErrNotReady = errors.New("TSDB not ready") -// DefaultOptions used for the DB. They are sane for setups using +// DefaultOptions used for the DB. They are reasonable for setups using // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ @@ -998,9 +998,9 @@ type dbAppender struct { var _ storage.GetRef = dbAppender{} -func (a dbAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { +func (a dbAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { if g, ok := a.Appender.(storage.GetRef); ok { - return g.GetRef(lset) + return g.GetRef(lset, hash) } return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index a3b5027ea49..d5003188d2c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -98,9 +98,9 @@ func (h *Head) initTime(t int64) { h.maxTime.CompareAndSwap(math.MinInt64, t) } -func (a *initAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { +func (a *initAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { if g, ok := a.app.(storage.GetRef); ok { - return g.GetRef(lset) + return g.GetRef(lset, hash) } return 0, nil } @@ -440,7 +440,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { } // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't -// use getOrCreate or make any of the lset sanity checks that Append does. +// use getOrCreate or make any of the lset validity checks that Append does. func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { // Check if exemplar storage is enabled. if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { @@ -647,8 +647,8 @@ func checkHistogramBuckets(buckets []int64) (uint64, error) { var _ storage.GetRef = &headAppender{} -func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { - s := a.head.series.getByHash(lset.Hash(), lset) +func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { + s := a.head.series.getByHash(hash, lset) if s == nil { return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 06a0f3e71ee..9d7897860bd 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -1164,44 +1164,37 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { // Earlier V1 formats don't have a sorted postings offset table, so // load the whole offset table into memory. r.postingsV1 = map[string]map[string]uint64{} - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error { + if _, ok := r.postingsV1[string(name)]; !ok { + r.postingsV1[string(name)] = map[string]uint64{} + r.postings[string(name)] = nil // Used to get a list of labelnames in places. } - if _, ok := r.postingsV1[key[0]]; !ok { - r.postingsV1[key[0]] = map[string]uint64{} - r.postings[key[0]] = nil // Used to get a list of labelnames in places. - } - r.postingsV1[key[0]][key[1]] = off + r.postingsV1[string(name)][string(value)] = off return nil }); err != nil { return nil, errors.Wrap(err, "read postings table") } } else { - var lastKey []string + var lastName, lastValue []byte lastOff := 0 valueCount := 0 // For the postings offset table we keep every label name but only every nth // label value (plus the first and last one), to save memory. - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } - if _, ok := r.postings[key[0]]; !ok { + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, _ uint64, off int) error { + if _, ok := r.postings[string(name)]; !ok { // Next label name. - r.postings[key[0]] = []postingOffset{} - if lastKey != nil { + r.postings[string(name)] = []postingOffset{} + if lastName != nil { // Always include last value for each label name. - r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) } - lastKey = nil valueCount = 0 } if valueCount%symbolFactor == 0 { - r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off}) - lastKey = nil + r.postings[string(name)] = append(r.postings[string(name)], postingOffset{value: string(value), off: off}) + lastName, lastValue = nil, nil } else { - lastKey = key + lastName, lastValue = name, value lastOff = off } valueCount++ @@ -1209,8 +1202,8 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { }); err != nil { return nil, errors.Wrap(err, "read postings table") } - if lastKey != nil { - r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + if lastName != nil { + r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) } // Trim any extra space in the slices. for k, v := range r.postings { @@ -1251,15 +1244,12 @@ type Range struct { // for all postings lists. func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { m := map[labels.Label]Range{} - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error { d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) if d.Err() != nil { return d.Err() } - m[labels.Label{Name: key[0], Value: key[1]}] = Range{ + m[labels.Label{Name: string(name), Value: string(value)}] = Range{ Start: int64(off) + 4, End: int64(off) + 4 + int64(d.Len()), } @@ -1412,29 +1402,29 @@ func (s *symbolsIter) Next() bool { func (s symbolsIter) At() string { return s.cur } func (s symbolsIter) Err() error { return s.err } -// ReadOffsetTable reads an offset table and at the given position calls f for each -// found entry. If f returns an error it stops decoding and returns the received error. -func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) error) error { +// ReadPostingsOffsetTable reads the postings offset table and at the given position calls f for each +// found entry. +// The name and value parameters passed to f reuse the backing memory of the underlying byte slice, +// so they shouldn't be persisted without previously copying them. +// If f returns an error it stops decoding and returns the received error. +func ReadPostingsOffsetTable(bs ByteSlice, off uint64, f func(name, value []byte, postingsOffset uint64, labelOffset int) error) error { d := encoding.NewDecbufAt(bs, int(off), castagnoliTable) startLen := d.Len() cnt := d.Be32() for d.Err() == nil && d.Len() > 0 && cnt > 0 { offsetPos := startLen - d.Len() - keyCount := d.Uvarint() - // The Postings offset table takes only 2 keys per entry (name and value of label), - // and the LabelIndices offset table takes only 1 key per entry (a label name). - // Hence setting the size to max of both, i.e. 2. - keys := make([]string, 0, 2) - for i := 0; i < keyCount; i++ { - keys = append(keys, d.UvarintStr()) + if keyCount := d.Uvarint(); keyCount != 2 { + return errors.Errorf("unexpected number of keys for postings offset table %d", keyCount) } + name := d.UvarintBytes() + value := d.UvarintBytes() o := d.Uvarint64() if d.Err() != nil { break } - if err := f(keys, o, offsetPos); err != nil { + if err := f(name, value, o, offsetPos); err != nil { return err } cnt-- diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 0f9b4d3b47d..cc765903c00 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -19,6 +19,7 @@ import ( "strings" "unicode/utf8" + "github.com/oklog/ulid" "github.com/pkg/errors" "golang.org/x/exp/slices" @@ -47,6 +48,7 @@ func init() { } type blockBaseQuerier struct { + blockID ulid.ULID index IndexReader chunks ChunkReader tombstones tombstones.Reader @@ -77,6 +79,7 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er tombsr = tombstones.NewMemTombstones() } return &blockBaseQuerier{ + blockID: b.Meta().ULID, mint: mint, maxt: maxt, index: indexr, @@ -178,7 +181,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if sortSeries { p = q.index.SortedPostings(p) } - return newBlockChunkSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return newBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } func findSetMatches(pattern string) []string { @@ -427,6 +430,7 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin // Iterated series are trimmed with given min and max time as well as tombstones. // See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. type blockBaseSeriesSet struct { + blockID ulid.ULID p index.Postings index IndexReader chunks ChunkReader @@ -471,7 +475,14 @@ func (b *blockBaseSeriesSet) Next() bool { var trimFront, trimBack bool // Copy chunks as iterables are reusable. - chks := make([]chunks.Meta, 0, len(b.bufChks)) + // Count those in range to size allocation (roughly - ignoring tombstones). + nChks := 0 + for _, chk := range b.bufChks { + if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) { + nChks++ + } + } + chks := make([]chunks.Meta, 0, nChks) // Prefilter chunks and pick those which are not entirely deleted or totally outside of the requested range. for _, chk := range b.bufChks { @@ -481,10 +492,10 @@ func (b *blockBaseSeriesSet) Next() bool { if chk.MinTime > b.maxt { continue } - - if !(tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) { - chks = append(chks, chk) + if (tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) { + continue } + chks = append(chks, chk) // If still not entirely deleted, check if trim is needed based on requested time range. if !b.disableTrimming { @@ -512,7 +523,7 @@ func (b *blockBaseSeriesSet) Next() bool { copy(b.currLabels, b.bufLbls) b.currIterFn = func() *populateWithDelGenericSeriesIterator { - return newPopulateWithDelGenericSeriesIterator(b.chunks, chks, intervals) + return newPopulateWithDelGenericSeriesIterator(b.blockID, b.chunks, chks, intervals) } return true } @@ -539,7 +550,8 @@ func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil } // means that the chunk iterator in currChkMeta is invalid and a chunk rewrite // is needed, for which currDelIter should be used. type populateWithDelGenericSeriesIterator struct { - chunks ChunkReader + blockID ulid.ULID + chunks ChunkReader // chks are expected to be sorted by minTime and should be related to // the same, single series. chks []chunks.Meta @@ -554,11 +566,13 @@ type populateWithDelGenericSeriesIterator struct { } func newPopulateWithDelGenericSeriesIterator( + blockID ulid.ULID, chunks ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals, ) *populateWithDelGenericSeriesIterator { return &populateWithDelGenericSeriesIterator{ + blockID: blockID, chunks: chunks, chks: chks, i: -1, @@ -577,7 +591,7 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d", p.currChkMeta.Ref) + p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String()) return false } @@ -837,9 +851,10 @@ type blockChunkSeriesSet struct { blockBaseSeriesSet } -func newBlockChunkSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { +func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { return &blockChunkSeriesSet{ blockBaseSeriesSet{ + blockID: id, index: i, chunks: c, tombstones: t, diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go index 1229f2da2d4..4a887a3d2aa 100644 --- a/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go @@ -72,6 +72,15 @@ var Funcs = map[string]FunctionCall{ "deg": simpleFunc(func(v float64) float64 { return v * 180 / math.Pi }), + "sgn": simpleFunc(func(v float64) float64 { + var sign float64 + if v > 0 { + sign = 1 + } else if v < 0 { + sign = -1 + } + return sign + }), "timestamp": func(f FunctionArgs) promql.Sample { return promql.Sample{ Point: promql.Point{ diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index c62cf48335b..0c7d062c9c3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -5,6 +5,7 @@ package indexheader import ( "bufio" + "bytes" "context" "encoding/binary" "hash" @@ -69,57 +70,64 @@ type BinaryTOC struct { PostingsOffsetTable uint64 } -// WriteBinary build index-header file from the pieces of index in object storage. -func WriteBinary(ctx context.Context, bkt objstore.BucketReader, id ulid.ULID, filename string) (err error) { +// WriteBinary build index header from the pieces of index in object storage, and cached in file if necessary. +func WriteBinary(ctx context.Context, bkt objstore.BucketReader, id ulid.ULID, filename string) ([]byte, error) { ir, indexVersion, err := newChunkedIndexReader(ctx, bkt, id) if err != nil { - return errors.Wrap(err, "new index reader") + return nil, errors.Wrap(err, "new index reader") + } + tmpFilename := "" + if filename != "" { + tmpFilename = filename + ".tmp" } - tmpFilename := filename + ".tmp" // Buffer for copying and encbuffers. // This also will control the size of file writer buffer. buf := make([]byte, 32*1024) - bw, err := newBinaryWriter(tmpFilename, buf) + bw, err := newBinaryWriter(id, tmpFilename, buf) if err != nil { - return errors.Wrap(err, "new binary index header writer") + return nil, errors.Wrap(err, "new binary index header writer") } defer runutil.CloseWithErrCapture(&err, bw, "close binary writer for %s", tmpFilename) if err := bw.AddIndexMeta(indexVersion, ir.toc.PostingsTable); err != nil { - return errors.Wrap(err, "add index meta") + return nil, errors.Wrap(err, "add index meta") } if err := ir.CopySymbols(bw.SymbolsWriter(), buf); err != nil { - return err + return nil, err } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } if err := ir.CopyPostingsOffsets(bw.PostingOffsetsWriter(), buf); err != nil { - return err + return nil, err } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } if err := bw.WriteTOC(); err != nil { - return errors.Wrap(err, "write index header TOC") + return nil, errors.Wrap(err, "write index header TOC") } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } - if err := bw.f.f.Sync(); err != nil { - return errors.Wrap(err, "sync") + if err := bw.writer.Sync(); err != nil { + return nil, errors.Wrap(err, "sync") } - // Create index-header in atomic way, to avoid partial writes (e.g during restart or crash of store GW). - return os.Rename(tmpFilename, filename) + if tmpFilename != "" { + // Create index-header in atomic way, to avoid partial writes (e.g during restart or crash of store GW). + return nil, os.Rename(tmpFilename, filename) + } + + return bw.Buffer(), nil } type chunkedIndexReader struct { @@ -231,7 +239,7 @@ func (r *chunkedIndexReader) CopyPostingsOffsets(w io.Writer, buf []byte) (err e // TODO(bwplotka): Add padding for efficient read. type binaryWriter struct { - f *FileWriter + writer PosWriter toc BinaryTOC @@ -241,38 +249,48 @@ type binaryWriter struct { crc32 hash.Hash } -func newBinaryWriter(fn string, buf []byte) (w *binaryWriter, err error) { - dir := filepath.Dir(fn) - - df, err := fileutil.OpenDir(dir) - if os.IsNotExist(err) { - if err := os.MkdirAll(dir, os.ModePerm); err != nil { - return nil, err - } - df, err = fileutil.OpenDir(dir) - } +func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryWriter, err error) { + var memoryWriter *MemoryWriter + memoryWriter, err = NewMemoryWriter(id, len(buf)) if err != nil { - return nil, err } + var binWriter PosWriter = memoryWriter - defer runutil.CloseWithErrCapture(&err, df, "dir close") + if cacheFilename != "" { + dir := filepath.Dir(cacheFilename) - if err := os.RemoveAll(fn); err != nil { - return nil, errors.Wrap(err, "remove any existing index at path") - } + df, err := fileutil.OpenDir(dir) + if os.IsNotExist(err) { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, err + } + df, err = fileutil.OpenDir(dir) + } + if err != nil { + return nil, err + } - // We use file writer for buffers not larger than reused one. - f, err := NewFileWriter(fn, len(buf)) - if err != nil { - return nil, err - } - if err := df.Sync(); err != nil { - return nil, errors.Wrap(err, "sync dir") + defer runutil.CloseWithErrCapture(&err, df, "dir close") + + if err := os.RemoveAll(cacheFilename); err != nil { + return nil, errors.Wrap(err, "remove any existing index at path") + } + + // We use file writer for buffers not larger than reused one. + var fileWriter *FileWriter + fileWriter, err = NewFileWriter(cacheFilename, memoryWriter) + if err != nil { + return nil, err + } + if err := df.Sync(); err != nil { + return nil, errors.Wrap(err, "sync dir") + } + binWriter = fileWriter } w = &binaryWriter{ - f: f, + writer: binWriter, // Reusable memory. buf: encoding.Encbuf{B: buf}, @@ -283,38 +301,42 @@ func newBinaryWriter(fn string, buf []byte) (w *binaryWriter, err error) { w.buf.PutBE32(MagicIndex) w.buf.PutByte(BinaryFormatV1) - return w, w.f.Write(w.buf.Get()) + return w, w.writer.Write(w.buf.Get()) } -type FileWriter struct { - f *os.File - fbuf *bufio.Writer - pos uint64 - name string +type PosWriter interface { + Pos() uint64 + Write(bufs ...[]byte) error + Buffer() []byte + Flush() error + Sync() error + Close() error +} + +type MemoryWriter struct { + id ulid.ULID + buf bytes.Buffer + pos uint64 } // TODO(bwplotka): Added size to method, upstream this. -func NewFileWriter(name string, size int) (*FileWriter, error) { - f, err := os.OpenFile(filepath.Clean(name), os.O_CREATE|os.O_RDWR, 0600) - if err != nil { - return nil, err - } - return &FileWriter{ - f: f, - fbuf: bufio.NewWriterSize(f, size), - pos: 0, - name: name, +func NewMemoryWriter(id ulid.ULID, size int) (*MemoryWriter, error) { + var buf bytes.Buffer + return &MemoryWriter{ + id: id, + buf: buf, + pos: 0, }, nil } -func (fw *FileWriter) Pos() uint64 { - return fw.pos +func (mw *MemoryWriter) Pos() uint64 { + return mw.pos } -func (fw *FileWriter) Write(bufs ...[]byte) error { +func (mw *MemoryWriter) Write(bufs ...[]byte) error { for _, b := range bufs { - n, err := fw.fbuf.Write(b) - fw.pos += uint64(n) + n, err := mw.buf.Write(b) + mw.pos += uint64(n) if err != nil { return err } @@ -322,40 +344,83 @@ func (fw *FileWriter) Write(bufs ...[]byte) error { // offset references in v1 are only 4 bytes large. // Once we move to compressed/varint representations in those areas, this limitation // can be lifted. - if fw.pos > 16*math.MaxUint32 { - return errors.Errorf("%q exceeding max size of 64GiB", fw.name) + if mw.pos > 16*math.MaxUint32 { + return errors.Errorf("%q exceeding max size of 64GiB", mw.id) } } return nil } -func (fw *FileWriter) Flush() error { - return fw.fbuf.Flush() +func (mw *MemoryWriter) Buffer() []byte { + return mw.buf.Bytes() } -func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error { - if err := fw.Flush(); err != nil { - return err - } - _, err := fw.f.WriteAt(buf, int64(pos)) - return err +func (mw *MemoryWriter) Flush() error { + return nil } -// AddPadding adds zero byte padding until the file size is a multiple size. -func (fw *FileWriter) AddPadding(size int) error { - p := fw.pos % uint64(size) - if p == 0 { - return nil +func (mw *MemoryWriter) Sync() error { + return nil +} + +func (mw *MemoryWriter) Close() error { + return mw.Flush() +} + +type FileWriter struct { + f *os.File + memWriter *MemoryWriter + fileWriter *bufio.Writer + name string +} + +// TODO(bwplotka): Added size to method, upstream this. +func NewFileWriter(name string, memWriter *MemoryWriter) (*FileWriter, error) { + f, err := os.OpenFile(filepath.Clean(name), os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return nil, err } - p = uint64(size) - p + return &FileWriter{ + f: f, + memWriter: memWriter, + fileWriter: bufio.NewWriterSize(f, memWriter.buf.Len()), + name: name, + }, nil +} + +func (fw *FileWriter) Pos() uint64 { + return fw.memWriter.Pos() +} - if err := fw.Write(make([]byte, p)); err != nil { - return errors.Wrap(err, "add padding") +func (fw *FileWriter) Write(bufs ...[]byte) error { + if err := fw.memWriter.Write(bufs...); err != nil { + return err + } + for _, b := range bufs { + _, err := fw.fileWriter.Write(b) + if err != nil { + return err + } } return nil } +func (fw *FileWriter) Buffer() []byte { + return fw.memWriter.Buffer() +} + +func (fw *FileWriter) Flush() error { + if err := fw.memWriter.Flush(); err != nil { + return err + } + + return fw.fileWriter.Flush() +} + func (fw *FileWriter) Close() error { + if err := fw.memWriter.Close(); err != nil { + return err + } if err := fw.Flush(); err != nil { return err } @@ -365,6 +430,13 @@ func (fw *FileWriter) Close() error { return fw.f.Close() } +func (fw *FileWriter) Sync() error { + if err := fw.memWriter.Sync(); err != nil { + return err + } + return fw.f.Sync() +} + func (fw *FileWriter) Remove() error { return os.Remove(fw.name) } @@ -373,16 +445,16 @@ func (w *binaryWriter) AddIndexMeta(indexVersion int, indexPostingOffsetTable ui w.buf.Reset() w.buf.PutByte(byte(indexVersion)) w.buf.PutBE64(indexPostingOffsetTable) - return w.f.Write(w.buf.Get()) + return w.writer.Write(w.buf.Get()) } func (w *binaryWriter) SymbolsWriter() io.Writer { - w.toc.Symbols = w.f.Pos() + w.toc.Symbols = w.writer.Pos() return w } func (w *binaryWriter) PostingOffsetsWriter() io.Writer { - w.toc.PostingsOffsetTable = w.f.Pos() + w.toc.PostingsOffsetTable = w.writer.Pos() return w } @@ -394,17 +466,21 @@ func (w *binaryWriter) WriteTOC() error { w.buf.PutHash(w.crc32) - return w.f.Write(w.buf.Get()) + return w.writer.Write(w.buf.Get()) } func (w *binaryWriter) Write(p []byte) (int, error) { - n := w.f.Pos() - err := w.f.Write(p) - return int(w.f.Pos() - n), err + n := w.writer.Pos() + err := w.writer.Write(p) + return int(w.writer.Pos() - n), err +} + +func (w *binaryWriter) Buffer() []byte { + return w.writer.Buffer() } func (w *binaryWriter) Close() error { - return w.f.Close() + return w.writer.Close() } type postingValueOffsets struct { @@ -458,21 +534,45 @@ type BinaryReader struct { // NewBinaryReader loads or builds new index-header if not present on disk. func NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (*BinaryReader, error) { - binfn := filepath.Join(dir, id.String(), block.IndexHeaderFilename) - br, err := newFileBinaryReader(binfn, postingOffsetsInMemSampling) - if err == nil { - return br, nil + if dir != "" { + binfn := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + br, err := newFileBinaryReader(binfn, postingOffsetsInMemSampling) + if err == nil { + return br, nil + } + + level.Debug(logger).Log("msg", "failed to read index-header from disk; recreating", "path", binfn, "err", err) + + start := time.Now() + if _, err := WriteBinary(ctx, bkt, id, binfn); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", binfn, "elapsed", time.Since(start)) + return newFileBinaryReader(binfn, postingOffsetsInMemSampling) + } else { + buf, err := WriteBinary(ctx, bkt, id, "") + if err != nil { + return nil, errors.Wrap(err, "generate index header") + } + + return newMemoryBinaryReader(buf, postingOffsetsInMemSampling) } +} - level.Debug(logger).Log("msg", "failed to read index-header from disk; recreating", "path", binfn, "err", err) +func newMemoryBinaryReader(buf []byte, postingOffsetsInMemSampling int) (bw *BinaryReader, err error) { + r := &BinaryReader{ + b: realByteSlice(buf), + c: nil, + postings: map[string]*postingValueOffsets{}, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + } - start := time.Now() - if err := WriteBinary(ctx, bkt, id, binfn); err != nil { - return nil, errors.Wrap(err, "write index header") + if err := r.init(); err != nil { + return nil, err } - level.Debug(logger).Log("msg", "built index-header file", "path", binfn, "elapsed", time.Since(start)) - return newFileBinaryReader(binfn, postingOffsetsInMemSampling) + return r, nil } func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *BinaryReader, err error) { @@ -493,12 +593,44 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina postingOffsetsInMemSampling: postingOffsetsInMemSampling, } + if err := r.init(); err != nil { + return nil, err + } + + return r, nil +} + +// newBinaryTOCFromByteSlice return parsed TOC from given index header byte slice. +func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { + if bs.Len() < binaryTOCLen { + return nil, encoding.ErrInvalidSize + } + b := bs.Range(bs.Len()-binaryTOCLen, bs.Len()) + + expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) + d := encoding.Decbuf{B: b[:len(b)-4]} + + if d.Crc32(castagnoliTable) != expCRC { + return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read index header TOC") + } + + if err := d.Err(); err != nil { + return nil, err + } + + return &BinaryTOC{ + Symbols: d.Be64(), + PostingsOffsetTable: d.Be64(), + }, nil +} + +func (r *BinaryReader) init() (err error) { // Verify header. if r.b.Len() < headerLen { - return nil, errors.Wrap(encoding.ErrInvalidSize, "index header's header") + return errors.Wrap(encoding.ErrInvalidSize, "index header's header") } if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { - return nil, errors.Errorf("invalid magic number %x", m) + return errors.Errorf("invalid magic number %x", m) } r.version = int(r.b.Range(4, 5)[0]) r.indexVersion = int(r.b.Range(5, 6)[0]) @@ -506,51 +638,48 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina r.indexLastPostingEnd = int64(binary.BigEndian.Uint64(r.b.Range(6, headerLen))) if r.version != BinaryFormatV1 { - return nil, errors.Errorf("unknown index header file version %d", r.version) + return errors.Errorf("unknown index header file version %d", r.version) } r.toc, err = newBinaryTOCFromByteSlice(r.b) if err != nil { - return nil, errors.Wrap(err, "read index header TOC") + return errors.Wrap(err, "read index header TOC") } // TODO(bwplotka): Consider contributing to Prometheus to allow specifying custom number for symbolsFactor. r.symbols, err = index.NewSymbols(r.b, r.indexVersion, int(r.toc.Symbols)) if err != nil { - return nil, errors.Wrap(err, "read symbols") + return errors.Wrap(err, "read symbols") } - var lastKey []string + var lastName, lastValue []byte if r.indexVersion == index.FormatV1 { // Earlier V1 formats don't have a sorted postings offset table, so // load the whole offset table into memory. r.postingsV1 = map[string]map[string]index.Range{} var prevRng index.Range - if err := index.ReadOffsetTable(r.b, r.toc.PostingsOffsetTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) + if err := index.ReadPostingsOffsetTable(r.b, r.toc.PostingsOffsetTable, func(name, value []byte, postingsOffset uint64, _ int) error { + if lastName != nil { + prevRng.End = int64(postingsOffset - crc32.Size) + r.postingsV1[string(lastName)][string(lastValue)] = prevRng } - if lastKey != nil { - prevRng.End = int64(off - crc32.Size) - r.postingsV1[lastKey[0]][lastKey[1]] = prevRng + if _, ok := r.postingsV1[string(name)]; !ok { + r.postingsV1[string(name)] = map[string]index.Range{} + r.postings[string(name)] = nil // Used to get a list of labelnames in places. } - if _, ok := r.postingsV1[key[0]]; !ok { - r.postingsV1[key[0]] = map[string]index.Range{} - r.postings[key[0]] = nil // Used to get a list of labelnames in places. - } - - lastKey = key - prevRng = index.Range{Start: int64(off + postingLengthFieldSize)} + lastName = name + lastValue = value + prevRng = index.Range{Start: int64(postingsOffset + postingLengthFieldSize)} return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return errors.Wrap(err, "read postings table") } - if lastKey != nil { + if string(lastName) != "" { prevRng.End = r.indexLastPostingEnd - crc32.Size - r.postingsV1[lastKey[0]][lastKey[1]] = prevRng + r.postingsV1[string(lastName)][string(lastValue)] = prevRng } } else { lastTableOff := 0 @@ -558,46 +687,44 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina // For the postings offset table we keep every label name but only every nth // label value (plus the first and last one), to save memory. - if err := index.ReadOffsetTable(r.b, r.toc.PostingsOffsetTable, func(key []string, off uint64, tableOff int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } - - if _, ok := r.postings[key[0]]; !ok { + if err := index.ReadPostingsOffsetTable(r.b, r.toc.PostingsOffsetTable, func(name, value []byte, postingsOffset uint64, labelOffset int) error { + if _, ok := r.postings[string(name)]; !ok { // Not seen before label name. - r.postings[key[0]] = &postingValueOffsets{} - if lastKey != nil { + r.postings[string(name)] = &postingValueOffsets{} + if lastName != nil { // Always include last value for each label name, unless it was just added in previous iteration based // on valueCount. - if (valueCount-1)%postingOffsetsInMemSampling != 0 { - r.postings[lastKey[0]].offsets = append(r.postings[lastKey[0]].offsets, postingOffset{value: lastKey[1], tableOff: lastTableOff}) + if (valueCount-1)%r.postingOffsetsInMemSampling != 0 { + r.postings[string(lastName)].offsets = append(r.postings[string(lastName)].offsets, postingOffset{value: string(lastValue), tableOff: lastTableOff}) } - r.postings[lastKey[0]].lastValOffset = int64(off - crc32.Size) - lastKey = nil + r.postings[string(lastName)].lastValOffset = int64(postingsOffset - crc32.Size) + lastName = nil + lastValue = nil } valueCount = 0 } - lastKey = key - lastTableOff = tableOff + lastName = name + lastValue = value + lastTableOff = labelOffset valueCount++ - if (valueCount-1)%postingOffsetsInMemSampling == 0 { - r.postings[key[0]].offsets = append(r.postings[key[0]].offsets, postingOffset{value: key[1], tableOff: tableOff}) + if (valueCount-1)%r.postingOffsetsInMemSampling == 0 { + r.postings[string(name)].offsets = append(r.postings[string(name)].offsets, postingOffset{value: string(value), tableOff: labelOffset}) } return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return errors.Wrap(err, "read postings table") } - if lastKey != nil { - if (valueCount-1)%postingOffsetsInMemSampling != 0 { + if lastName != nil { + if (valueCount-1)%r.postingOffsetsInMemSampling != 0 { // Always include last value for each label name if not included already based on valueCount. - r.postings[lastKey[0]].offsets = append(r.postings[lastKey[0]].offsets, postingOffset{value: lastKey[1], tableOff: lastTableOff}) + r.postings[string(lastName)].offsets = append(r.postings[string(lastName)].offsets, postingOffset{value: string(lastValue), tableOff: lastTableOff}) } // In any case lastValOffset is unknown as don't have next posting anymore. Guess from TOC table. // In worst case we will overfetch a few bytes. - r.postings[lastKey[0]].lastValOffset = r.indexLastPostingEnd - crc32.Size + r.postings[string(lastName)].lastValOffset = r.indexLastPostingEnd - crc32.Size } // Trim any extra space in the slices. for k, v := range r.postings { @@ -614,38 +741,14 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina } off, err := r.symbols.ReverseLookup(k) if err != nil { - return nil, errors.Wrap(err, "reverse symbol lookup") + return errors.Wrap(err, "reverse symbol lookup") } r.nameSymbols[off] = k } r.dec = &index.Decoder{LookupSymbol: r.LookupSymbol} - return r, nil -} - -// newBinaryTOCFromByteSlice return parsed TOC from given index header byte slice. -func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { - if bs.Len() < binaryTOCLen { - return nil, encoding.ErrInvalidSize - } - b := bs.Range(bs.Len()-binaryTOCLen, bs.Len()) - - expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) - d := encoding.Decbuf{B: b[:len(b)-4]} - - if d.Crc32(castagnoliTable) != expCRC { - return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read index header TOC") - } - - if err := d.Err(); err != nil { - return nil, err - } - - return &BinaryTOC{ - Symbols: d.Be64(), - PostingsOffsetTable: d.Be64(), - }, nil + return nil } func (r *BinaryReader) IndexVersion() (int, error) { @@ -915,7 +1018,12 @@ func (r *BinaryReader) LabelNames() ([]string, error) { return labelNames, nil } -func (r *BinaryReader) Close() error { return r.c.Close() } +func (r *BinaryReader) Close() error { + if r.c == nil { + return nil + } + return r.c.Close() +} type realByteSlice []byte diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go index 13bf4768123..c3bee382c2f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -71,7 +71,6 @@ type LazyBinaryReader struct { logger log.Logger bkt objstore.BucketReader dir string - filepath string id ulid.ULID postingOffsetsInMemSampling int metrics *LazyBinaryReaderMetrics @@ -99,22 +98,23 @@ func NewLazyBinaryReader( metrics *LazyBinaryReaderMetrics, onClosed func(*LazyBinaryReader), ) (*LazyBinaryReader, error) { - filepath := filepath.Join(dir, id.String(), block.IndexHeaderFilename) - - // If the index-header doesn't exist we should download it. - if _, err := os.Stat(filepath); err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrap(err, "read index header") + if dir != "" { + indexHeaderFile := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + // If the index-header doesn't exist we should download it. + if _, err := os.Stat(indexHeaderFile); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "read index header") + } + + level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", indexHeaderFile) + + start := time.Now() + if _, err := WriteBinary(ctx, bkt, id, indexHeaderFile); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", indexHeaderFile, "elapsed", time.Since(start)) } - - level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", filepath) - - start := time.Now() - if err := WriteBinary(ctx, bkt, id, filepath); err != nil { - return nil, errors.Wrap(err, "write index header") - } - - level.Debug(logger).Log("msg", "built index-header file", "path", filepath, "elapsed", time.Since(start)) } return &LazyBinaryReader{ @@ -122,7 +122,6 @@ func NewLazyBinaryReader( logger: logger, bkt: bkt, dir: dir, - filepath: filepath, id: id, postingOffsetsInMemSampling: postingOffsetsInMemSampling, metrics: metrics, @@ -241,7 +240,7 @@ func (r *LazyBinaryReader) load() (returnErr error) { return r.readerErr } - level.Debug(r.logger).Log("msg", "lazy loading index-header file", "path", r.filepath) + level.Debug(r.logger).Log("msg", "lazy loading index-header", "block", r.id) r.metrics.loadCount.Inc() startTime := time.Now() @@ -249,11 +248,11 @@ func (r *LazyBinaryReader) load() (returnErr error) { if err != nil { r.metrics.loadFailedCount.Inc() r.readerErr = err - return errors.Wrapf(err, "lazy load index-header file at %s", r.filepath) + return errors.Wrapf(err, "lazy load index-header for block %s", r.id) } r.reader = reader - level.Debug(r.logger).Log("msg", "lazy loaded index-header file", "path", r.filepath, "elapsed", time.Since(startTime)) + level.Debug(r.logger).Log("msg", "lazy loaded index-header", "block", r.id, "elapsed", time.Since(startTime)) r.metrics.loadDuration.Observe(time.Since(startTime).Seconds()) return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go index 0aa76651cc9..a84ea54767b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go @@ -323,7 +323,7 @@ type adjustableSeriesIterator interface { // adjustAtValue allows to adjust value by implementation if needed knowing the last value. This is used by counter // implementation which can adjust for obsolete counter value. - adjustAtValue(lastValue float64) + adjustAtValue(lastFloatValue float64) } type noopAdjustableSeriesIterator struct { @@ -360,11 +360,11 @@ type counterErrAdjustSeriesIterator struct { errAdjust float64 } -func (it *counterErrAdjustSeriesIterator) adjustAtValue(lastValue float64) { +func (it *counterErrAdjustSeriesIterator) adjustAtValue(lastFloatValue float64) { _, v := it.At() - if lastValue > v { + if lastFloatValue > v { // This replica has obsolete value (did not see the correct "end" of counter value before app restart). Adjust. - it.errAdjust += lastValue - v + it.errAdjust += lastFloatValue - v } } @@ -380,8 +380,8 @@ type dedupSeriesIterator struct { // TODO(bwplotka): Don't base on LastT, but on detected scrape interval. This will allow us to be more // responsive to gaps: https://github.com/thanos-io/thanos/issues/981, let's do it in next PR. - lastT int64 - lastV float64 + lastT int64 + lastIter chunkenc.Iterator penA, penB int64 useA bool @@ -389,23 +389,24 @@ type dedupSeriesIterator struct { func newDedupSeriesIterator(a, b adjustableSeriesIterator) *dedupSeriesIterator { return &dedupSeriesIterator{ - a: a, - b: b, - lastT: math.MinInt64, - lastV: float64(math.MinInt64), - aval: a.Next(), - bval: b.Next(), + a: a, + b: b, + lastT: math.MinInt64, + lastIter: a, + aval: a.Next(), + bval: b.Next(), } } func (it *dedupSeriesIterator) Next() chunkenc.ValueType { - lastValue := it.lastV + lastFloatVal, isFloatVal := it.lastFloatVal() lastUseA := it.useA defer func() { - if it.useA != lastUseA { + if it.useA != lastUseA && isFloatVal { // We switched replicas. // Ensure values are correct bases on value before At. - it.adjustAtValue(lastValue) + // TODO(rabenhorst): Investigate if we also need to implement adjusting histograms here. + it.adjustAtValue(lastFloatVal) } }() @@ -421,14 +422,16 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { if it.aval == chunkenc.ValNone { it.useA = false if it.bval != chunkenc.ValNone { - it.lastT, it.lastV = it.b.At() + it.lastT = it.b.AtT() + it.lastIter = it.b it.penB = 0 } return it.bval } if it.bval == chunkenc.ValNone { it.useA = true - it.lastT, it.lastV = it.a.At() + it.lastT = it.a.AtT() + it.lastIter = it.a it.penA = 0 return it.aval } @@ -436,8 +439,8 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { // with the smaller timestamp. // The applied penalty potentially already skipped potential samples already // that would have resulted in exaggerated sampling frequency. - ta, va := it.a.At() - tb, vb := it.b.At() + ta := it.a.AtT() + tb := it.b.AtT() it.useA = ta <= tb @@ -458,7 +461,8 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { } it.penA = 0 it.lastT = ta - it.lastV = va + it.lastIter = it.a + return it.aval } if it.lastT != math.MinInt64 { @@ -468,26 +472,40 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { } it.penB = 0 it.lastT = tb - it.lastV = vb + it.lastIter = it.b return it.bval } -func (it *dedupSeriesIterator) adjustAtValue(lastValue float64) { - if it.aval != chunkenc.ValNone { - it.a.adjustAtValue(lastValue) +func (it *dedupSeriesIterator) lastFloatVal() (float64, bool) { + if it.useA && it.aval == chunkenc.ValFloat { + _, v := it.lastIter.At() + return v, true } - if it.bval != chunkenc.ValNone { - it.b.adjustAtValue(lastValue) + if !it.useA && it.bval == chunkenc.ValFloat { + _, v := it.lastIter.At() + return v, true + } + return 0, false +} + +func (it *dedupSeriesIterator) adjustAtValue(lastFloatValue float64) { + if it.aval == chunkenc.ValFloat { + it.a.adjustAtValue(lastFloatValue) + } + if it.bval == chunkenc.ValFloat { + it.b.adjustAtValue(lastFloatValue) } } -// TODO(rabenhorst): Native histogram support needs to be implemented, float type hardcoded. func (it *dedupSeriesIterator) Seek(t int64) chunkenc.ValueType { // Don't use underlying Seek, but iterate over next to not miss gaps. for { - ts, _ := it.At() + ts := it.AtT() if ts >= t { - return chunkenc.ValFloat + if it.useA { + return it.a.Seek(ts) + } + return it.b.Seek(ts) } if it.Next() == chunkenc.ValNone { return chunkenc.ValNone @@ -496,27 +514,23 @@ func (it *dedupSeriesIterator) Seek(t int64) chunkenc.ValueType { } func (it *dedupSeriesIterator) At() (int64, float64) { - if it.useA { - return it.a.At() - } - return it.b.At() + return it.lastIter.At() } -// TODO(rabenhorst): Needs to be implemented for native histogram support. func (it *dedupSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("not implemented") + return it.lastIter.AtHistogram() } func (it *dedupSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - panic("not implemented") + return it.lastIter.AtFloatHistogram() } func (it *dedupSeriesIterator) AtT() int64 { var t int64 if it.useA { - t, _ = it.a.At() + t = it.a.AtT() } else { - t, _ = it.b.At() + t = it.b.AtT() } return t } @@ -553,18 +567,16 @@ func (it *boundedSeriesIterator) At() (t int64, v float64) { return it.it.At() } -// TODO(rabenhorst): Needs to be implemented for native histogram support. func (it *boundedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("not implemented") + return it.it.AtHistogram() } func (it *boundedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - panic("not implemented") + return it.it.AtFloatHistogram() } func (it *boundedSeriesIterator) AtT() int64 { - t, _ := it.it.At() - return t + return it.it.AtT() } func (it *boundedSeriesIterator) Next() chunkenc.ValueType { @@ -572,14 +584,14 @@ func (it *boundedSeriesIterator) Next() chunkenc.ValueType { if valueType == chunkenc.ValNone { return chunkenc.ValNone } - t, _ := it.it.At() + t := it.it.AtT() // Advance the iterator if we are before the valid interval. if t < it.mint { if it.Seek(it.mint) == chunkenc.ValNone { return chunkenc.ValNone } - t, _ = it.it.At() + t = it.it.AtT() } // Once we passed the valid interval, there is no going back. if t <= it.maxt { diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go index 00c970b9296..76f8958e79f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go @@ -122,14 +122,13 @@ func (it *pushdownSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHis } func (it *pushdownSeriesIterator) AtT() int64 { - t, _ := it.a.At() + t := it.a.AtT() return t } -// TODO(rabenhorst): Native histogram support needs to be implemented, currently float type is hardcoded. func (it *pushdownSeriesIterator) Seek(t int64) chunkenc.ValueType { for { - ts, _ := it.At() + ts := it.AtT() if ts >= t { return chunkenc.ValFloat } diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go index 36a16a3112a..3df0fc69898 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go +++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go @@ -89,13 +89,20 @@ func (k *Keeper) NewGate(maxConcurrent int) Gate { func New(reg prometheus.Registerer, maxConcurrent int) Gate { promauto.With(reg).NewGauge(MaxGaugeOpts).Set(float64(maxConcurrent)) + var gate Gate + if maxConcurrent <= 0 { + gate = NewNoop() + } else { + gate = promgate.New(maxConcurrent) + } + return InstrumentGateDuration( promauto.With(reg).NewHistogram(DurationHistogramOpts), InstrumentGateTotal( promauto.With(reg).NewCounter(TotalCounterOpts), InstrumentGateInFlight( promauto.With(reg).NewGauge(InFlightGaugeOpts), - promgate.New(maxConcurrent), + gate, ), ), ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index b1fbd898a25..23517b8d91c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -495,6 +495,10 @@ func NewBucketStore( return nil, errors.Wrap(err, "validate config") } + if dir == "" { + return s, nil + } + if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create dir") } @@ -591,6 +595,10 @@ func (s *BucketStore) InitialSync(ctx context.Context) error { return errors.Wrap(err, "sync block") } + if s.dir == "" { + return nil + } + fis, err := os.ReadDir(s.dir) if err != nil { return errors.Wrap(err, "read dir") @@ -624,15 +632,20 @@ func (s *BucketStore) getBlock(id ulid.ULID) *bucketBlock { } func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err error) { - dir := filepath.Join(s.dir, meta.ULID.String()) + var dir string + if s.dir != "" { + dir = filepath.Join(s.dir, meta.ULID.String()) + } start := time.Now() level.Debug(s.logger).Log("msg", "loading new block", "id", meta.ULID) defer func() { if err != nil { s.metrics.blockLoadFailures.Inc() - if err2 := os.RemoveAll(dir); err2 != nil { - level.Warn(s.logger).Log("msg", "failed to remove block we cannot load", "err", err2) + if dir != "" { + if err2 := os.RemoveAll(dir); err2 != nil { + level.Warn(s.logger).Log("msg", "failed to remove block we cannot load", "err", err2) + } } level.Warn(s.logger).Log("msg", "loading block failed", "elapsed", time.Since(start), "id", meta.ULID, "err", err) } else { @@ -721,6 +734,11 @@ func (s *BucketStore) removeBlock(id ulid.ULID) error { if err := b.Close(); err != nil { return errors.Wrap(err, "close block") } + + if b.dir == "" { + return nil + } + return os.RemoveAll(b.dir) } @@ -1208,6 +1226,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } shardMatcher := req.ShardInfo.Matcher(&s.buffers) + blockClient := newBlockSeriesClient( srv.Context(), s.logger, @@ -1220,9 +1239,11 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie s.seriesBatchSize, s.metrics.chunkFetchDuration, ) + defer blockClient.Close() g.Go(func() error { + span, _ := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ "block.id": blk.meta.ULID, "block.mint": blk.meta.MinTime, @@ -1464,6 +1485,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq SkipChunks: true, } blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + defer blockClient.Close() if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, @@ -1638,6 +1660,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR SkipChunks: true, } blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + defer blockClient.Close() if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index d9245035f45..d969ddb64f6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -91,9 +91,9 @@ func newProxyStoreMetrics(reg prometheus.Registerer) *proxyStoreMetrics { return &m } -func RegisterStoreServer(storeSrv storepb.StoreServer) func(*grpc.Server) { +func RegisterStoreServer(storeSrv storepb.StoreServer, logger log.Logger) func(*grpc.Server) { return func(s *grpc.Server) { - storepb.RegisterStoreServer(s, storeSrv) + storepb.RegisterStoreServer(s, NewRecoverableStoreServer(logger, storeSrv)) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/recover.go b/vendor/github.com/thanos-io/thanos/pkg/store/recover.go new file mode 100644 index 00000000000..c453c3bec3c --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/recover.go @@ -0,0 +1,52 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package store + +import ( + "fmt" + "runtime" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +type recoverableStoreServer struct { + logger log.Logger + storepb.StoreServer +} + +func NewRecoverableStoreServer(logger log.Logger, storeServer storepb.StoreServer) *recoverableStoreServer { + return &recoverableStoreServer{logger: logger, StoreServer: storeServer} +} + +func (r *recoverableStoreServer) Series(request *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + defer r.recover(srv) + return r.StoreServer.Series(request, srv) +} + +func (r *recoverableStoreServer) recover(srv storepb.Store_SeriesServer) { + e := recover() + if e == nil { + return + } + + switch err := e.(type) { + case runtime.Error: + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + level.Error(r.logger).Log("msg", "runtime panic in TSDB Series server", "err", err.Error(), "stacktrace", string(buf)) + if err := srv.Send(storepb.NewWarnSeriesResponse(err)); err != nil { + level.Error(r.logger).Log("err", err) + } + default: + if err := srv.Send(storepb.NewWarnSeriesResponse(errors.New(fmt.Sprintf("unknown error while processing Series: %v", e)))); err != nil { + level.Error(r.logger).Log("err", err) + } + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index c1f4b9b8bfe..85f858562bb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -13,10 +13,12 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "google.golang.org/grpc/codes" "github.com/thanos-io/thanos/pkg/store/labelpb" + prompb "github.com/thanos-io/thanos/pkg/store/storepb/prompb" ) var PartialResponseStrategyValues = func() []string { @@ -552,3 +554,29 @@ func (m *QueryHints) IsSafeToExecute() bool { return false } + +// HistogramProtoToHistogram extracts a (normal integer) Histogram from the +// provided proto message. The caller has to make sure that the proto message +// represents an interger histogram and not a float histogram. +func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { + return &histogram.Histogram{ + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), + } +} + +func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go index aeb4a25aef2..7d2b8c7f619 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go @@ -55,6 +55,14 @@ type inProcessStream struct { err chan error } +func NewInProcessStream(ctx context.Context, bufferSize int) *inProcessStream { + return &inProcessStream{ + ctx: ctx, + recv: make(chan *SeriesResponse, bufferSize), + err: make(chan error), + } +} + func (s *inProcessStream) Context() context.Context { return s.ctx } func (s *inProcessStream) Send(r *SeriesResponse) error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go index d8ef292dcfa..f5332f345ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go @@ -70,6 +70,37 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_166e07899dab7c14, []int{0, 0} } +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{3, 0} +} + type LabelMatcher_Type int32 const ( @@ -98,25 +129,28 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{4, 0} + return fileDescriptor_166e07899dab7c14, []int{6, 0} } // We require this to match chunkenc.Encoding. type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", + 2: "HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, } func (x Chunk_Encoding) String() string { @@ -124,7 +158,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{6, 0} + return fileDescriptor_166e07899dab7c14, []int{8, 0} } type MetricMetadata struct { @@ -305,20 +339,315 @@ func (m *Exemplar) GetTimestamp() int64 { return 0 } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus_copy.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []*BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []*BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { // Labels have to be sorted by label names and without duplicated label names. // TODO(bwplotka): Don't use zero copy ZLabels, see https://github.com/thanos-io/thanos/pull/3279 for details. - Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{3} + return fileDescriptor_166e07899dab7c14, []int{5} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -361,6 +690,13 @@ func (m *TimeSeries) GetExemplars() []Exemplar { return nil } +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + // Matcher specifies a rule, which can match or set of labels or not. type LabelMatcher struct { Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus_copy.LabelMatcher_Type" json:"type,omitempty"` @@ -372,7 +708,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{4} + return fileDescriptor_166e07899dab7c14, []int{6} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -436,7 +772,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{5} + return fileDescriptor_166e07899dab7c14, []int{7} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +863,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{6} + return fileDescriptor_166e07899dab7c14, []int{8} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -596,7 +932,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{7} + return fileDescriptor_166e07899dab7c14, []int{9} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,11 +970,14 @@ func (m *ChunkedSeries) GetChunks() []Chunk { func init() { proto.RegisterEnum("prometheus_copy.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus_copy.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) proto.RegisterEnum("prometheus_copy.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus_copy.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus_copy.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus_copy.Sample") proto.RegisterType((*Exemplar)(nil), "prometheus_copy.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus_copy.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus_copy.BucketSpan") proto.RegisterType((*TimeSeries)(nil), "prometheus_copy.TimeSeries") proto.RegisterType((*LabelMatcher)(nil), "prometheus_copy.LabelMatcher") proto.RegisterType((*ReadHints)(nil), "prometheus_copy.ReadHints") @@ -649,57 +988,79 @@ func init() { func init() { proto.RegisterFile("store/storepb/prompb/types.proto", fileDescriptor_166e07899dab7c14) } var fileDescriptor_166e07899dab7c14 = []byte{ - // 796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x8f, 0xdb, 0x44, - 0x14, 0xcf, 0xd8, 0x8e, 0x1d, 0xbf, 0xfd, 0x83, 0x35, 0x2a, 0xd4, 0xbb, 0x42, 0x59, 0xcb, 0xa7, - 0x08, 0x81, 0x23, 0xb5, 0x15, 0x5c, 0x0a, 0xd2, 0x6e, 0xe5, 0x6e, 0x2b, 0x70, 0xa2, 0x4e, 0xb2, - 0x02, 0x7a, 0x89, 0x26, 0xce, 0xd4, 0xb1, 0x1a, 0xff, 0x91, 0x67, 0x82, 0x36, 0xdf, 0x82, 0x33, - 0x37, 0xc4, 0x8d, 0x1b, 0x7c, 0x8a, 0x1e, 0x7b, 0x44, 0x1c, 0x2a, 0xb4, 0x7b, 0xe2, 0x5b, 0xa0, - 0x19, 0x3b, 0xf5, 0xa6, 0x4b, 0xaf, 0xbd, 0xac, 0xde, 0xfb, 0xfd, 0xde, 0x3f, 0xbf, 0xf7, 0xdb, - 0x09, 0x78, 0x5c, 0x14, 0x15, 0x1b, 0xaa, 0xbf, 0xe5, 0x7c, 0x58, 0x56, 0x45, 0x56, 0xce, 0x87, - 0x62, 0x53, 0x32, 0x1e, 0x94, 0x55, 0x21, 0x0a, 0xfc, 0x91, 0xc4, 0x98, 0x58, 0xb2, 0x35, 0x9f, - 0xc5, 0x45, 0xb9, 0x39, 0xbe, 0x93, 0x14, 0x49, 0xa1, 0xb8, 0xa1, 0xb4, 0xea, 0xb0, 0xe3, 0xa3, - 0xba, 0xd0, 0x8a, 0xce, 0xd9, 0x6a, 0xb7, 0x82, 0xff, 0xab, 0x06, 0x87, 0x11, 0x13, 0x55, 0x1a, - 0x47, 0x4c, 0xd0, 0x05, 0x15, 0x14, 0x7f, 0x03, 0x86, 0x8c, 0x70, 0x91, 0x87, 0x06, 0x87, 0xf7, - 0x3e, 0x0b, 0xde, 0xe9, 0x11, 0xec, 0x86, 0x37, 0xee, 0x74, 0x53, 0x32, 0xa2, 0xf2, 0xf0, 0xe7, - 0x80, 0x33, 0x85, 0xcd, 0x5e, 0xd0, 0x2c, 0x5d, 0x6d, 0x66, 0x39, 0xcd, 0x98, 0xab, 0x79, 0x68, - 0x60, 0x13, 0xa7, 0x66, 0x1e, 0x2b, 0x62, 0x44, 0x33, 0x86, 0x31, 0x18, 0x4b, 0xb6, 0x2a, 0x5d, - 0x43, 0xf1, 0xca, 0x96, 0xd8, 0x3a, 0x4f, 0x85, 0xdb, 0xad, 0x31, 0x69, 0xfb, 0x1b, 0x80, 0xb6, - 0x13, 0xde, 0x03, 0xeb, 0x62, 0xf4, 0xed, 0x68, 0xfc, 0xfd, 0xc8, 0xe9, 0x48, 0xe7, 0xd1, 0xf8, - 0x62, 0x34, 0x0d, 0x89, 0x83, 0xb0, 0x0d, 0xdd, 0xf3, 0xd3, 0x8b, 0xf3, 0xd0, 0xd1, 0xf0, 0x01, - 0xd8, 0x4f, 0x9e, 0x4e, 0xa6, 0xe3, 0x73, 0x72, 0x1a, 0x39, 0x3a, 0xc6, 0x70, 0xa8, 0x98, 0x16, - 0x33, 0x64, 0xea, 0xe4, 0x22, 0x8a, 0x4e, 0xc9, 0x8f, 0x4e, 0x17, 0xf7, 0xc0, 0x78, 0x3a, 0x7a, - 0x3c, 0x76, 0x4c, 0xbc, 0x0f, 0xbd, 0xc9, 0xf4, 0x74, 0x1a, 0x4e, 0xc2, 0xa9, 0x63, 0xf9, 0x0f, - 0xc1, 0x9c, 0xd0, 0xac, 0x5c, 0x31, 0x7c, 0x07, 0xba, 0x3f, 0xd1, 0xd5, 0xba, 0xde, 0x0d, 0x22, - 0xb5, 0x83, 0x3f, 0x05, 0x5b, 0xa4, 0x19, 0xe3, 0x82, 0x66, 0xa5, 0xfa, 0x4e, 0x9d, 0xb4, 0x80, - 0xff, 0x1b, 0x82, 0x5e, 0x78, 0xc9, 0xb2, 0x72, 0x45, 0x2b, 0x1c, 0x83, 0xa9, 0xae, 0xc0, 0x5d, - 0xe4, 0xe9, 0x83, 0xbd, 0x7b, 0x07, 0x81, 0x58, 0xd2, 0xbc, 0xe0, 0xc1, 0x77, 0x12, 0x3d, 0x7b, - 0xf8, 0xea, 0xcd, 0x49, 0xe7, 0xef, 0x37, 0x27, 0x0f, 0x92, 0x54, 0x2c, 0xd7, 0xf3, 0x20, 0x2e, - 0xb2, 0x61, 0x1d, 0xf0, 0x45, 0x5a, 0x34, 0xd6, 0xb0, 0x7c, 0x99, 0x0c, 0x77, 0x0e, 0x1a, 0x3c, - 0x57, 0xd9, 0xa4, 0x29, 0xdd, 0x4e, 0xa9, 0xbd, 0x77, 0x4a, 0xfd, 0xdd, 0x29, 0xff, 0x45, 0x00, - 0xd3, 0x34, 0x63, 0x13, 0x56, 0xa5, 0x8c, 0x7f, 0x98, 0x39, 0xbf, 0x02, 0x8b, 0xab, 0xbd, 0x72, - 0x57, 0x53, 0x5d, 0xee, 0xde, 0xd2, 0x5a, 0xbd, 0xf7, 0x33, 0x43, 0xf6, 0x23, 0xdb, 0x68, 0xfc, - 0x35, 0xd8, 0xac, 0xd9, 0x28, 0x77, 0x75, 0x95, 0x7a, 0x74, 0x2b, 0x75, 0xbb, 0xf3, 0x26, 0xb9, - 0xcd, 0xf0, 0x7f, 0x41, 0xb0, 0xaf, 0x26, 0x89, 0xa8, 0x88, 0x97, 0xac, 0xc2, 0x5f, 0xee, 0x28, - 0xde, 0xbf, 0x55, 0xea, 0x66, 0x70, 0x70, 0x43, 0xe9, 0x18, 0x8c, 0x1b, 0xda, 0x56, 0x76, 0xbb, - 0x7c, 0x5d, 0x81, 0xb5, 0xe3, 0x0f, 0xc0, 0x50, 0xba, 0x35, 0x41, 0x0b, 0x9f, 0x39, 0x1d, 0x6c, - 0x81, 0x3e, 0x0a, 0x9f, 0x39, 0x48, 0x02, 0x44, 0x6a, 0x55, 0x02, 0x24, 0x74, 0x74, 0xff, 0x0f, - 0x04, 0x36, 0x61, 0x74, 0xf1, 0x24, 0xcd, 0x05, 0xc7, 0x77, 0xc1, 0xe2, 0x82, 0x95, 0xb3, 0x8c, - 0xab, 0xe1, 0x74, 0x62, 0x4a, 0x37, 0xe2, 0xb2, 0xf5, 0x8b, 0x75, 0x1e, 0x6f, 0x5b, 0x4b, 0x1b, - 0x1f, 0x41, 0x8f, 0x0b, 0x5a, 0x09, 0x19, 0x5d, 0x1f, 0xd8, 0x52, 0x7e, 0xc4, 0xf1, 0xc7, 0x60, - 0xb2, 0x7c, 0x21, 0x09, 0x43, 0x11, 0x5d, 0x96, 0x2f, 0x22, 0x8e, 0x8f, 0xa1, 0x97, 0x54, 0xc5, - 0xba, 0x4c, 0xf3, 0xc4, 0xed, 0x7a, 0xfa, 0xc0, 0x26, 0x6f, 0x7d, 0x7c, 0x08, 0xda, 0x7c, 0xe3, - 0x9a, 0x1e, 0x1a, 0xf4, 0x88, 0x36, 0xdf, 0xc8, 0xea, 0x15, 0xcd, 0x13, 0x26, 0x8b, 0x58, 0x75, - 0x75, 0xe5, 0x47, 0xdc, 0xff, 0x13, 0x41, 0xf7, 0xd1, 0x72, 0x9d, 0xbf, 0xc4, 0x7d, 0xd8, 0xcb, - 0xd2, 0x7c, 0x26, 0x75, 0xd5, 0xce, 0x6c, 0x67, 0x69, 0x2e, 0xb5, 0x15, 0x71, 0xc5, 0xd3, 0xcb, - 0xb7, 0x7c, 0xf3, 0xcf, 0x92, 0xd1, 0xcb, 0x86, 0xbf, 0xdf, 0x5c, 0x42, 0x57, 0x97, 0x38, 0xb9, - 0x75, 0x09, 0xd5, 0x25, 0x08, 0xf3, 0xb8, 0x58, 0xa4, 0x79, 0xd2, 0x9e, 0x41, 0xbe, 0x44, 0xea, - 0xd3, 0xf6, 0x89, 0xb2, 0x7d, 0x0f, 0x7a, 0xdb, 0xa8, 0xdd, 0xc7, 0xc2, 0x02, 0xfd, 0x87, 0x31, - 0x71, 0x90, 0xff, 0x3b, 0x82, 0x03, 0x55, 0x8e, 0x2d, 0x3e, 0xa4, 0xe8, 0x1f, 0x80, 0x19, 0xcb, - 0xae, 0x5b, 0xcd, 0x7f, 0xf2, 0xff, 0xdf, 0xd8, 0xa8, 0xb6, 0x89, 0x3d, 0xf3, 0x5e, 0x5d, 0xf5, - 0xd1, 0xeb, 0xab, 0x3e, 0xfa, 0xe7, 0xaa, 0x8f, 0x7e, 0xbe, 0xee, 0x77, 0x5e, 0x5f, 0xf7, 0x3b, - 0x7f, 0x5d, 0xf7, 0x3b, 0xcf, 0xcd, 0xfa, 0x67, 0x61, 0x6e, 0xaa, 0xf7, 0xfc, 0xfe, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x98, 0xc8, 0x2e, 0xfd, 0x35, 0x06, 0x00, 0x00, + // 1139 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x8f, 0x1a, 0xc7, + 0x13, 0x67, 0x66, 0x60, 0x60, 0x6a, 0x01, 0x8f, 0x5b, 0xfe, 0xdb, 0xe3, 0xfd, 0x27, 0x2c, 0x19, + 0xe5, 0x81, 0xac, 0x04, 0x24, 0x7b, 0x95, 0x5c, 0x36, 0x51, 0x96, 0x0d, 0xfb, 0x50, 0x02, 0xc8, + 0x0d, 0xab, 0xc4, 0xbe, 0xa0, 0x06, 0x7a, 0x99, 0xd1, 0x32, 0x0f, 0x4d, 0x37, 0xd6, 0x92, 0x4f, + 0x91, 0x73, 0x6e, 0x51, 0x6e, 0xc9, 0x29, 0x1f, 0x21, 0x37, 0x9f, 0x22, 0x1f, 0xa3, 0x1c, 0xac, + 0x68, 0xf7, 0x8b, 0x44, 0xdd, 0x33, 0xc3, 0xc0, 0x12, 0x4b, 0x39, 0xf9, 0x82, 0xaa, 0x7e, 0xf5, + 0xfa, 0x4d, 0x57, 0x75, 0x35, 0x50, 0x67, 0x3c, 0x88, 0x68, 0x4b, 0xfe, 0x86, 0xe3, 0x56, 0x18, + 0x05, 0x5e, 0x38, 0x6e, 0xf1, 0x65, 0x48, 0x59, 0x33, 0x8c, 0x02, 0x1e, 0xa0, 0x3b, 0x02, 0xa3, + 0xdc, 0xa1, 0x0b, 0x36, 0x9a, 0x04, 0xe1, 0x72, 0xf7, 0xde, 0x2c, 0x98, 0x05, 0xd2, 0xd6, 0x12, + 0x52, 0xec, 0xb6, 0xfb, 0x30, 0x4e, 0x34, 0x27, 0x63, 0x3a, 0xdf, 0xcc, 0x60, 0xff, 0xa4, 0x42, + 0xb5, 0x4b, 0x79, 0xe4, 0x4e, 0xba, 0x94, 0x93, 0x29, 0xe1, 0x04, 0x7d, 0x01, 0x79, 0xe1, 0x61, + 0x29, 0x75, 0xa5, 0x51, 0x7d, 0xfc, 0xa8, 0x79, 0xab, 0x46, 0x73, 0xd3, 0x3d, 0x51, 0x87, 0xcb, + 0x90, 0x62, 0x19, 0x87, 0x3e, 0x06, 0xe4, 0x49, 0x6c, 0x74, 0x41, 0x3c, 0x77, 0xbe, 0x1c, 0xf9, + 0xc4, 0xa3, 0x96, 0x5a, 0x57, 0x1a, 0x06, 0x36, 0x63, 0xcb, 0xb1, 0x34, 0xf4, 0x88, 0x47, 0x11, + 0x82, 0xbc, 0x43, 0xe7, 0xa1, 0x95, 0x97, 0x76, 0x29, 0x0b, 0x6c, 0xe1, 0xbb, 0xdc, 0x2a, 0xc4, + 0x98, 0x90, 0xed, 0x25, 0x40, 0x56, 0x09, 0xed, 0x40, 0xf1, 0xbc, 0xf7, 0x75, 0xaf, 0xff, 0x6d, + 0xcf, 0xcc, 0x09, 0xe5, 0xa8, 0x7f, 0xde, 0x1b, 0x76, 0xb0, 0xa9, 0x20, 0x03, 0x0a, 0x27, 0x87, + 0xe7, 0x27, 0x1d, 0x53, 0x45, 0x15, 0x30, 0x4e, 0xcf, 0x06, 0xc3, 0xfe, 0x09, 0x3e, 0xec, 0x9a, + 0x1a, 0x42, 0x50, 0x95, 0x96, 0x0c, 0xcb, 0x8b, 0xd0, 0xc1, 0x79, 0xb7, 0x7b, 0x88, 0x9f, 0x99, + 0x05, 0x54, 0x82, 0xfc, 0x59, 0xef, 0xb8, 0x6f, 0xea, 0xa8, 0x0c, 0xa5, 0xc1, 0xf0, 0x70, 0xd8, + 0x19, 0x74, 0x86, 0x66, 0xd1, 0x3e, 0x00, 0x7d, 0x40, 0xbc, 0x70, 0x4e, 0xd1, 0x3d, 0x28, 0xbc, + 0x20, 0xf3, 0x45, 0x7c, 0x36, 0x0a, 0x8e, 0x15, 0xf4, 0x0e, 0x18, 0xdc, 0xf5, 0x28, 0xe3, 0xc4, + 0x0b, 0xe5, 0x77, 0x6a, 0x38, 0x03, 0xec, 0x9f, 0x15, 0x28, 0x75, 0xae, 0xa8, 0x17, 0xce, 0x49, + 0x84, 0x26, 0xa0, 0xcb, 0x2e, 0x30, 0x4b, 0xa9, 0x6b, 0x8d, 0x9d, 0xc7, 0x95, 0x26, 0x77, 0x88, + 0x1f, 0xb0, 0xe6, 0x37, 0x02, 0x6d, 0x1f, 0xbc, 0x7c, 0xbd, 0x97, 0xfb, 0xeb, 0xf5, 0xde, 0xfe, + 0xcc, 0xe5, 0xce, 0x62, 0xdc, 0x9c, 0x04, 0x5e, 0x2b, 0x76, 0xf8, 0xc4, 0x0d, 0x12, 0xa9, 0x15, + 0x5e, 0xce, 0x5a, 0x1b, 0x0d, 0x6d, 0x3e, 0x97, 0xd1, 0x38, 0x49, 0x9d, 0xb1, 0x54, 0xdf, 0xc8, + 0x52, 0xbb, 0xcd, 0xf2, 0x8f, 0x02, 0x18, 0xa7, 0x2e, 0xe3, 0xc1, 0x2c, 0x22, 0x1e, 0x7a, 0x17, + 0x8c, 0x49, 0xb0, 0xf0, 0xf9, 0xc8, 0xf5, 0xb9, 0xfc, 0xd6, 0xfc, 0x69, 0x0e, 0x97, 0x24, 0x74, + 0xe6, 0x73, 0xf4, 0x1e, 0xec, 0xc4, 0xe6, 0x8b, 0x79, 0x40, 0x78, 0x5c, 0xe6, 0x34, 0x87, 0x41, + 0x82, 0xc7, 0x02, 0x43, 0x26, 0x68, 0x6c, 0xe1, 0xc9, 0x3a, 0x0a, 0x16, 0x22, 0xba, 0x0f, 0x3a, + 0x9b, 0x38, 0xd4, 0x23, 0xb2, 0xd5, 0x77, 0x71, 0xa2, 0xa1, 0x0f, 0xa0, 0xfa, 0x3d, 0x8d, 0x82, + 0x11, 0x77, 0x22, 0xca, 0x9c, 0x60, 0x3e, 0x95, 0x6d, 0x57, 0x70, 0x45, 0xa0, 0xc3, 0x14, 0x44, + 0x1f, 0x26, 0x6e, 0x19, 0x2f, 0x5d, 0xf2, 0x52, 0x70, 0x59, 0xe0, 0x47, 0x29, 0xb7, 0x47, 0x60, + 0xae, 0xf9, 0xc5, 0x04, 0x8b, 0x92, 0xa0, 0x82, 0xab, 0x2b, 0xcf, 0x98, 0x64, 0x1b, 0xaa, 0x3e, + 0x9d, 0x11, 0xee, 0xbe, 0xa0, 0x23, 0x16, 0x12, 0x9f, 0x59, 0x25, 0xd9, 0x95, 0xff, 0x6f, 0xcd, + 0x7c, 0x7b, 0x31, 0xb9, 0xa4, 0x7c, 0x10, 0x12, 0x1f, 0x57, 0xd2, 0x10, 0xa1, 0x31, 0xf4, 0x11, + 0xdc, 0x59, 0xe5, 0x98, 0xd2, 0x39, 0x27, 0xcc, 0x32, 0xea, 0x5a, 0x03, 0xe1, 0x55, 0xea, 0xaf, + 0x24, 0xba, 0xe1, 0x28, 0xc9, 0x31, 0x0b, 0xea, 0x5a, 0x43, 0xc9, 0x1c, 0x25, 0x33, 0x26, 0x58, + 0x85, 0x01, 0x73, 0xd7, 0x58, 0xed, 0xfc, 0x07, 0x56, 0x69, 0xc8, 0x8a, 0xd5, 0x2a, 0x47, 0xc2, + 0xaa, 0x1c, 0xb3, 0x4a, 0xe1, 0x8c, 0xd5, 0xca, 0x31, 0x61, 0x55, 0x89, 0x59, 0xa5, 0x70, 0xc2, + 0xea, 0x08, 0x20, 0xa2, 0x8c, 0xf2, 0x91, 0x23, 0xce, 0xbe, 0x2a, 0x77, 0xc3, 0xfb, 0x5b, 0x8c, + 0x56, 0x23, 0xd4, 0xc4, 0xc2, 0xf9, 0xd4, 0xf5, 0x39, 0x36, 0xa2, 0x54, 0xdc, 0x9c, 0xc1, 0x3b, + 0xb7, 0x67, 0x70, 0x1f, 0x8c, 0x55, 0xd4, 0xe6, 0x0d, 0x2f, 0x82, 0xf6, 0xac, 0x33, 0x30, 0x15, + 0xa4, 0x83, 0xda, 0xeb, 0x9b, 0x6a, 0x76, 0xcb, 0xb5, 0x76, 0x11, 0x0a, 0x92, 0x78, 0xbb, 0x0c, + 0x90, 0x75, 0xde, 0x3e, 0x00, 0xc8, 0x8e, 0x47, 0x0c, 0x5f, 0x70, 0x71, 0xc1, 0x68, 0x3c, 0xcd, + 0x77, 0x71, 0xa2, 0x09, 0x7c, 0x4e, 0xfd, 0x19, 0x77, 0xe4, 0x10, 0x57, 0x70, 0xa2, 0xd9, 0xbf, + 0xaa, 0x00, 0x43, 0xd7, 0xa3, 0x03, 0x1a, 0xb9, 0x94, 0xbd, 0x9d, 0x6b, 0xfb, 0x19, 0x14, 0x99, + 0x5c, 0x33, 0xcc, 0x52, 0x65, 0x95, 0x07, 0x5b, 0xc7, 0x1b, 0xaf, 0xa1, 0x76, 0x5e, 0xd4, 0xc3, + 0xa9, 0x37, 0xfa, 0x1c, 0x0c, 0x9a, 0x2c, 0x18, 0x66, 0x69, 0x32, 0xf4, 0xe1, 0x56, 0x68, 0xba, + 0x82, 0x92, 0xe0, 0x2c, 0x02, 0x7d, 0x09, 0xe0, 0xa4, 0x6d, 0x63, 0x56, 0x5e, 0xc6, 0xef, 0xbe, + 0xb9, 0xb3, 0x49, 0x82, 0xb5, 0x18, 0xfb, 0x47, 0x05, 0xca, 0xf2, 0x5b, 0xba, 0x84, 0x4f, 0x1c, + 0x1a, 0xa1, 0x4f, 0x37, 0x9e, 0x10, 0x7b, 0x2b, 0xd9, 0xba, 0x73, 0x73, 0xed, 0xe9, 0x40, 0x90, + 0x5f, 0x7b, 0x2c, 0xa4, 0x9c, 0x6d, 0x33, 0x4d, 0x82, 0xb1, 0x62, 0x37, 0x20, 0x2f, 0x1f, 0x02, + 0x1d, 0xd4, 0xce, 0xd3, 0x78, 0x42, 0x7a, 0x9d, 0xa7, 0xf1, 0x84, 0x60, 0xb1, 0xfc, 0x05, 0x80, + 0x3b, 0xa6, 0x66, 0xff, 0xa6, 0x88, 0xb1, 0x22, 0x53, 0x31, 0x55, 0x0c, 0x3d, 0x80, 0x22, 0xe3, + 0x34, 0x1c, 0x79, 0x4c, 0x92, 0xd3, 0xb0, 0x2e, 0xd4, 0x2e, 0x13, 0xa5, 0x2f, 0x16, 0xfe, 0x24, + 0x2d, 0x2d, 0x64, 0xf4, 0x10, 0x4a, 0x8c, 0x93, 0x88, 0x0b, 0xef, 0x78, 0x63, 0x16, 0xa5, 0xde, + 0x65, 0xe8, 0x7f, 0xa0, 0x53, 0x7f, 0x3a, 0x92, 0x07, 0x26, 0x0c, 0x05, 0xea, 0x4f, 0xbb, 0x0c, + 0xed, 0x42, 0x69, 0x16, 0x05, 0x8b, 0xd0, 0xf5, 0x67, 0x56, 0xa1, 0xae, 0x35, 0x0c, 0xbc, 0xd2, + 0x51, 0x15, 0xd4, 0xf1, 0x52, 0x6e, 0xad, 0x12, 0x56, 0xc7, 0x4b, 0x91, 0x3d, 0x22, 0xfe, 0x8c, + 0x8a, 0x24, 0xc5, 0x38, 0xbb, 0xd4, 0xbb, 0xcc, 0xfe, 0x5d, 0x81, 0xc2, 0x91, 0xb3, 0xf0, 0x2f, + 0x51, 0x0d, 0x76, 0x3c, 0xd7, 0x1f, 0x89, 0x4b, 0x92, 0x71, 0x36, 0x3c, 0xd7, 0x17, 0xd3, 0xd9, + 0x65, 0xd2, 0x4e, 0xae, 0x56, 0xf6, 0xe4, 0xf5, 0xf1, 0xc8, 0x55, 0x62, 0x7f, 0x92, 0x74, 0x42, + 0x93, 0x9d, 0xd8, 0xdb, 0xea, 0x84, 0xac, 0xd2, 0xec, 0xf8, 0x93, 0x60, 0xea, 0xfa, 0xb3, 0xac, + 0x0d, 0xe2, 0x69, 0x97, 0x9f, 0x56, 0xc6, 0x52, 0xb6, 0x5b, 0x50, 0x4a, 0xbd, 0xb6, 0xee, 0xe6, + 0x77, 0x7d, 0xf1, 0xf2, 0x6e, 0x3c, 0xb7, 0xaa, 0xfd, 0x8b, 0x02, 0x15, 0x99, 0x9d, 0x4e, 0xdf, + 0xe6, 0x2d, 0xda, 0x07, 0x7d, 0x22, 0xaa, 0xa6, 0x97, 0xe8, 0xfe, 0xbf, 0x7f, 0x72, 0x32, 0xc5, + 0x89, 0x6f, 0xbb, 0xfe, 0xf2, 0xba, 0xa6, 0xbc, 0xba, 0xae, 0x29, 0x7f, 0x5f, 0xd7, 0x94, 0x1f, + 0x6e, 0x6a, 0xb9, 0x57, 0x37, 0xb5, 0xdc, 0x9f, 0x37, 0xb5, 0xdc, 0x73, 0x3d, 0xfe, 0xdb, 0x35, + 0xd6, 0xe5, 0xff, 0xa5, 0x27, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x67, 0xfa, 0x2f, 0x95, + 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -833,7 +1194,7 @@ func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { +func (m *Histogram) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -843,20 +1204,59 @@ func (m *TimeSeries) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -864,13 +1264,42 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -878,27 +1307,210 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x42 } } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err } - i-- - dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1176,6 +1788,119 @@ func (m *Exemplar) Size() (n int) { return n } +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1200,6 +1925,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } @@ -1665,7 +2396,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { } return nil } -func (m *TimeSeries) Unmarshal(dAtA []byte) error { +func (m *Histogram) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1688,17 +2419,17 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1708,31 +2439,39 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - var msglen int + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1742,31 +2481,29 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1776,26 +2513,669 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Exemplars = append(m.Exemplars, Exemplar{}) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto index f62ff7552bf..0fea825508b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto @@ -62,6 +62,72 @@ message Exemplar { int64 timestamp = 3; } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + + // TimeSeries represents samples and labels for a single time series. message TimeSeries { // Labels have to be sorted by label names and without duplicated label names. @@ -69,6 +135,7 @@ message TimeSeries { repeated thanos.Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; repeated Sample samples = 2 [(gogoproto.nullable) = false]; repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; } // Matcher specifies a rule, which can match or set of labels or not. @@ -104,6 +171,7 @@ message Chunk { enum Encoding { UNKNOWN = 0; XOR = 1; + HISTOGRAM = 2; } Encoding type = 3; bytes data = 4; diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go index 271a785bf51..a87a135ba0a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go @@ -63,15 +63,18 @@ func (PartialResponseStrategy) EnumDescriptor() ([]byte, []int) { type Chunk_Encoding int32 const ( - Chunk_XOR Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 0 + Chunk_HISTOGRAM Chunk_Encoding = 1 ) var Chunk_Encoding_name = map[int32]string{ 0: "XOR", + 1: "HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "XOR": 0, + "XOR": 0, + "HISTOGRAM": 1, } func (x Chunk_Encoding) String() string { @@ -287,41 +290,42 @@ func init() { func init() { proto.RegisterFile("store/storepb/types.proto", fileDescriptor_121fba57de02d8e0) } var fileDescriptor_121fba57de02d8e0 = []byte{ - // 536 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x4f, 0x6f, 0xd3, 0x4e, - 0x10, 0xf5, 0xda, 0x8e, 0x93, 0xcc, 0xaf, 0x3f, 0x64, 0x96, 0x0a, 0xdc, 0x1e, 0x9c, 0xc8, 0x08, - 0x11, 0x55, 0xaa, 0x2d, 0x15, 0x8e, 0x5c, 0x12, 0x94, 0x1b, 0xb4, 0x74, 0x1b, 0x09, 0xd4, 0x0b, - 0xda, 0xb8, 0x2b, 0xdb, 0x6a, 0xfc, 0x47, 0xf6, 0xba, 0x24, 0xdf, 0x02, 0xc4, 0x9d, 0xcf, 0x93, - 0x63, 0x8f, 0x88, 0x43, 0x04, 0xc9, 0x17, 0x41, 0x1e, 0x3b, 0x40, 0xa4, 0x5c, 0xac, 0xf1, 0x7b, - 0x6f, 0x66, 0x76, 0xde, 0xce, 0xc2, 0x51, 0x21, 0xd3, 0x5c, 0x78, 0xf8, 0xcd, 0xa6, 0x9e, 0x5c, - 0x64, 0xa2, 0x70, 0xb3, 0x3c, 0x95, 0x29, 0x35, 0x64, 0xc8, 0x93, 0xb4, 0x38, 0x3e, 0x0c, 0xd2, - 0x20, 0x45, 0xc8, 0xab, 0xa2, 0x9a, 0x3d, 0x6e, 0x12, 0x67, 0x7c, 0x2a, 0x66, 0xbb, 0x89, 0xce, - 0x1d, 0xb4, 0x5e, 0x87, 0x65, 0x72, 0x4b, 0x4f, 0x40, 0xaf, 0x70, 0x8b, 0xf4, 0xc9, 0xe0, 0xc1, - 0xd9, 0x63, 0xb7, 0x2e, 0xe8, 0x22, 0xe9, 0x8e, 0x13, 0x3f, 0xbd, 0x89, 0x92, 0x80, 0xa1, 0x86, - 0x52, 0xd0, 0x6f, 0xb8, 0xe4, 0x96, 0xda, 0x27, 0x83, 0x03, 0x86, 0x31, 0xb5, 0x40, 0x0f, 0x79, - 0x11, 0x5a, 0x5a, 0x9f, 0x0c, 0xf4, 0x91, 0xbe, 0x5c, 0xf5, 0x08, 0x43, 0xc4, 0x79, 0x04, 0x9d, - 0x6d, 0x3e, 0x6d, 0x83, 0xf6, 0xe1, 0x82, 0x99, 0x8a, 0xf3, 0x8d, 0x80, 0x71, 0x25, 0xf2, 0x48, - 0x14, 0xd4, 0x07, 0x03, 0x4f, 0x56, 0x58, 0xa4, 0xaf, 0x0d, 0xfe, 0x3b, 0xfb, 0x7f, 0xdb, 0xfb, - 0x4d, 0x85, 0x8e, 0x5e, 0x2d, 0x57, 0x3d, 0xe5, 0xc7, 0xaa, 0xf7, 0x32, 0x88, 0x64, 0x58, 0x4e, - 0x5d, 0x3f, 0x8d, 0xbd, 0x5a, 0x70, 0x1a, 0xa5, 0x4d, 0xe4, 0x65, 0xb7, 0x81, 0xb7, 0x33, 0xa4, - 0x7b, 0x8d, 0xd9, 0xac, 0x29, 0x4d, 0x3d, 0x30, 0xfc, 0x6a, 0x94, 0xc2, 0x52, 0xb1, 0xc9, 0xc3, - 0x6d, 0x93, 0x61, 0x10, 0xe4, 0x38, 0x24, 0x9e, 0x59, 0x61, 0x8d, 0xcc, 0xf9, 0xaa, 0x42, 0xf7, - 0x0f, 0x47, 0x8f, 0xa0, 0x13, 0x47, 0xc9, 0x47, 0x19, 0xc5, 0xb5, 0x43, 0x1a, 0x6b, 0xc7, 0x51, - 0x32, 0x89, 0x62, 0x81, 0x14, 0x9f, 0xd7, 0x94, 0xda, 0x50, 0x7c, 0x8e, 0x54, 0x0f, 0xb4, 0x9c, - 0x7f, 0x42, 0x4b, 0xfe, 0x19, 0x0b, 0x2b, 0xb2, 0x8a, 0xa1, 0x4f, 0xa1, 0xe5, 0xa7, 0x65, 0x22, - 0x2d, 0x7d, 0x9f, 0xa4, 0xe6, 0xaa, 0x2a, 0x45, 0x19, 0x5b, 0xad, 0xbd, 0x55, 0x8a, 0x32, 0xae, - 0x04, 0x71, 0x94, 0x58, 0xc6, 0x5e, 0x41, 0x1c, 0x25, 0x28, 0xe0, 0x73, 0xab, 0xbd, 0x5f, 0xc0, - 0xe7, 0xf4, 0x39, 0xb4, 0xb1, 0x97, 0xc8, 0xad, 0xce, 0x3e, 0xd1, 0x96, 0x75, 0xbe, 0x10, 0x38, - 0x40, 0x63, 0xdf, 0x72, 0xe9, 0x87, 0x22, 0xa7, 0xa7, 0x3b, 0x6b, 0x73, 0xb4, 0x73, 0x75, 0x8d, - 0xc6, 0x9d, 0x2c, 0x32, 0xf1, 0x77, 0x73, 0x12, 0xde, 0x18, 0xd5, 0x65, 0x18, 0xd3, 0x43, 0x68, - 0xdd, 0xf1, 0x59, 0x29, 0xd0, 0xa7, 0x2e, 0xab, 0x7f, 0x9c, 0x01, 0xe8, 0x55, 0x1e, 0x35, 0x40, - 0x1d, 0x5f, 0x9a, 0x4a, 0xb5, 0x39, 0xe7, 0xe3, 0x4b, 0x93, 0x54, 0x00, 0x1b, 0x9b, 0x2a, 0x02, - 0x6c, 0x6c, 0x6a, 0x27, 0x2e, 0x3c, 0x79, 0xc7, 0x73, 0x19, 0xf1, 0x19, 0x13, 0x45, 0x96, 0x26, - 0x85, 0xb8, 0x92, 0x39, 0x97, 0x22, 0x58, 0xd0, 0x0e, 0xe8, 0xef, 0x87, 0xec, 0xdc, 0x54, 0x68, - 0x17, 0x5a, 0xc3, 0xd1, 0x05, 0x9b, 0x98, 0x64, 0xf4, 0x6c, 0xf9, 0xcb, 0x56, 0x96, 0x6b, 0x9b, - 0xdc, 0xaf, 0x6d, 0xf2, 0x73, 0x6d, 0x93, 0xcf, 0x1b, 0x5b, 0xb9, 0xdf, 0xd8, 0xca, 0xf7, 0x8d, - 0xad, 0x5c, 0xb7, 0x9b, 0xe7, 0x35, 0x35, 0xf0, 0x81, 0xbc, 0xf8, 0x1d, 0x00, 0x00, 0xff, 0xff, - 0x13, 0xe7, 0xb3, 0x25, 0x76, 0x03, 0x00, 0x00, + // 553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0xbd, 0xb6, 0xe3, 0x24, 0xf3, 0x6b, 0x7f, 0x32, 0xab, 0x0a, 0xdc, 0x1e, 0x9c, 0xc8, + 0x08, 0x11, 0x55, 0xaa, 0x2d, 0x15, 0x8e, 0x5c, 0x12, 0x14, 0x01, 0x12, 0x6d, 0xe8, 0x26, 0x12, + 0xa8, 0x17, 0xb4, 0x71, 0x57, 0xb6, 0xd5, 0xf8, 0x8f, 0xbc, 0x6b, 0x48, 0x1e, 0x80, 0x3b, 0x88, + 0x3b, 0xcf, 0x93, 0x63, 0x8f, 0x88, 0x43, 0x04, 0xc9, 0x8b, 0x20, 0xaf, 0x1d, 0x20, 0x52, 0x2e, + 0xd6, 0x78, 0x3e, 0xdf, 0x99, 0xd9, 0x99, 0x9d, 0x85, 0x63, 0x2e, 0xd2, 0x9c, 0x79, 0xf2, 0x9b, + 0x4d, 0x3d, 0xb1, 0xc8, 0x18, 0x77, 0xb3, 0x3c, 0x15, 0x29, 0x36, 0x44, 0x48, 0x93, 0x94, 0x9f, + 0x1c, 0x05, 0x69, 0x90, 0x4a, 0x97, 0x57, 0x5a, 0x15, 0x3d, 0xa9, 0x03, 0x67, 0x74, 0xca, 0x66, + 0xbb, 0x81, 0xce, 0x27, 0x04, 0x8d, 0xe7, 0x61, 0x91, 0xdc, 0xe2, 0x53, 0xd0, 0x4b, 0x60, 0xa1, + 0x2e, 0xea, 0xfd, 0x7f, 0x7e, 0xdf, 0xad, 0x32, 0xba, 0x12, 0xba, 0xc3, 0xc4, 0x4f, 0x6f, 0xa2, + 0x24, 0x20, 0x52, 0x83, 0x31, 0xe8, 0x37, 0x54, 0x50, 0x4b, 0xed, 0xa2, 0xde, 0x01, 0x91, 0x36, + 0xb6, 0x40, 0x0f, 0x29, 0x0f, 0x2d, 0xad, 0x8b, 0x7a, 0xfa, 0x40, 0x5f, 0xae, 0x3a, 0x88, 0x48, + 0x8f, 0xe3, 0x40, 0x6b, 0x1b, 0x8f, 0x9b, 0xa0, 0xbd, 0x1b, 0x11, 0x53, 0xc1, 0x87, 0xd0, 0x7e, + 0xf9, 0x6a, 0x3c, 0x19, 0xbd, 0x20, 0xfd, 0x0b, 0x13, 0x39, 0xdf, 0x10, 0x18, 0x63, 0x96, 0x47, + 0x8c, 0x63, 0x1f, 0x0c, 0x79, 0x52, 0x6e, 0xa1, 0xae, 0xd6, 0xfb, 0xef, 0xfc, 0x70, 0x7b, 0x94, + 0xd7, 0xa5, 0x77, 0xf0, 0x6c, 0xb9, 0xea, 0x28, 0x3f, 0x56, 0x9d, 0xa7, 0x41, 0x24, 0xc2, 0x62, + 0xea, 0xfa, 0x69, 0xec, 0x55, 0x82, 0xb3, 0x28, 0xad, 0x2d, 0x2f, 0xbb, 0x0d, 0xbc, 0x9d, 0xa6, + 0xdd, 0x6b, 0x19, 0x4d, 0xea, 0xd4, 0xd8, 0x03, 0xc3, 0x2f, 0x3b, 0xe3, 0x96, 0x2a, 0x8b, 0xdc, + 0xdb, 0x16, 0xe9, 0x07, 0x41, 0x2e, 0x7b, 0x96, 0x2d, 0x28, 0xa4, 0x96, 0x39, 0x5f, 0x55, 0x68, + 0xff, 0x61, 0xf8, 0x18, 0x5a, 0x71, 0x94, 0xbc, 0x17, 0x51, 0x5c, 0x0d, 0x4c, 0x23, 0xcd, 0x38, + 0x4a, 0x26, 0x51, 0xcc, 0x24, 0xa2, 0xf3, 0x0a, 0xa9, 0x35, 0xa2, 0x73, 0x89, 0x3a, 0xa0, 0xe5, + 0xf4, 0xa3, 0x9c, 0xd0, 0x3f, 0x6d, 0xc9, 0x8c, 0xa4, 0x24, 0xf8, 0x21, 0x34, 0xfc, 0xb4, 0x48, + 0x84, 0xa5, 0xef, 0x93, 0x54, 0xac, 0xcc, 0xc2, 0x8b, 0xd8, 0x6a, 0xec, 0xcd, 0xc2, 0x8b, 0xb8, + 0x14, 0xc4, 0x51, 0x62, 0x19, 0x7b, 0x05, 0x71, 0x94, 0x48, 0x01, 0x9d, 0x5b, 0xcd, 0xfd, 0x02, + 0x3a, 0xc7, 0x8f, 0xa1, 0x29, 0x6b, 0xb1, 0xdc, 0x6a, 0xed, 0x13, 0x6d, 0xa9, 0xf3, 0x05, 0xc1, + 0x81, 0x1c, 0xec, 0x05, 0x15, 0x7e, 0xc8, 0x72, 0x7c, 0xb6, 0xb3, 0x45, 0xc7, 0x3b, 0x57, 0x57, + 0x6b, 0xdc, 0xc9, 0x22, 0x63, 0x7f, 0x17, 0x29, 0xa1, 0xf5, 0xa0, 0xda, 0x44, 0xda, 0xf8, 0x08, + 0x1a, 0x1f, 0xe8, 0xac, 0x60, 0x72, 0x4e, 0x6d, 0x52, 0xfd, 0x38, 0x3d, 0xd0, 0xcb, 0x38, 0x6c, + 0x80, 0x3a, 0xbc, 0x32, 0x95, 0x72, 0x91, 0x2e, 0x87, 0x57, 0x26, 0x2a, 0x1d, 0x64, 0x68, 0xaa, + 0xd2, 0x41, 0x86, 0xa6, 0x76, 0xea, 0xc2, 0x83, 0x37, 0x34, 0x17, 0x11, 0x9d, 0x11, 0xc6, 0xb3, + 0x34, 0xe1, 0x6c, 0x2c, 0x72, 0x2a, 0x58, 0xb0, 0xc0, 0x2d, 0xd0, 0xdf, 0xf6, 0xc9, 0xa5, 0xa9, + 0xe0, 0x36, 0x34, 0xfa, 0x83, 0x11, 0x99, 0x98, 0x68, 0xf0, 0x68, 0xf9, 0xcb, 0x56, 0x96, 0x6b, + 0x1b, 0xdd, 0xad, 0x6d, 0xf4, 0x73, 0x6d, 0xa3, 0xcf, 0x1b, 0x5b, 0xb9, 0xdb, 0xd8, 0xca, 0xf7, + 0x8d, 0xad, 0x5c, 0x37, 0xeb, 0xe7, 0x36, 0x35, 0xe4, 0x83, 0x79, 0xf2, 0x3b, 0x00, 0x00, 0xff, + 0xff, 0x44, 0x9d, 0x95, 0xa2, 0x86, 0x03, 0x00, 0x00, } func (m *Chunk) Marshal() (dAtA []byte, err error) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto index 3739e23b8f4..67c93fa52ed 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto @@ -23,6 +23,7 @@ option (gogoproto.goproto_sizecache_all) = false; message Chunk { enum Encoding { XOR = 0; + HISTOGRAM = 1; } Encoding type = 1; bytes data = 2; diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3032..d896edc9968 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368c1..11e31f421c5 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 49fde3d8c82..fb3c19d6b64 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -28,6 +28,7 @@ var ( ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) @@ -70,6 +71,14 @@ var ( Aggregation: view.Count(), } + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index b2059824a85..fe0e971086e 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -27,6 +27,7 @@ var ( ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) @@ -73,6 +74,14 @@ var ( Aggregation: view.Count(), } + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 89cac9c4ec0..9cb27320ca1 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -82,8 +82,10 @@ func methodName(fullname string) string { // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: @@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) { } } +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea6423572..f7c8434be06 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee029..31477a464fd 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b6..436dc791f83 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b97283462e..8b5b99803ce 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568cda..61f72d20da3 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2b5..bcd6e08c748 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1ff2..60bf0e39254 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f6d..6a79cd8a34c 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a2..8fb17226fe3 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56b7..e28cf13cde9 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f38e..7a1616a55c5 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497ed5..80095a5f6c0 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf2847..b8fc1e495a9 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859c0..da488fc8740 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 4c037f1d8e0..fd91e4162da 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -164,10 +164,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } var bw bodyWrapper - // if request body is nil we don't want to mutate the body as it will affect - // the identity of it in an unforeseeable way because we assert ReadCloser - // fulfills a certain interface and it is indeed nil. - if r.Body != nil { + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { bw.ReadCloser = r.Body bw.record = readRecordFunc r.Body = &bw @@ -180,7 +180,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: 200, // default status code in case the Handler doesn't write anything + } // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -230,10 +236,9 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, if wrote > 0 { attributes = append(attributes, WroteBytesKey.Int64(wrote)) } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) - } + attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) + span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) + if werr != nil && werr != io.EOF { attributes = append(attributes, WriteErrorKey.String(werr.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 6ef983af4cb..822491db06e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.36.4" + return "0.37.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go index 5c8260ceb6f..330a14c2f64 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -34,9 +36,11 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact Counter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,9 +51,11 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact UpDownCounter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -60,6 +66,8 @@ type UpDownCounter interface { } // Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. type Gauge interface { // Observe records the state of the instrument to be x. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go index b07409c7931..4fce9963c3b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -34,9 +36,11 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact Counter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,9 +51,11 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact UpDownCounter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -60,6 +66,8 @@ type UpDownCounter interface { } // Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. type Gauge interface { // Observe records the state of the instrument to be x. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go index 435db1127bc..2ec192f70d8 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -32,6 +34,8 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { // Add records a change to the counter. Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) @@ -40,6 +44,8 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { // Add records a change to the counter. Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) @@ -48,6 +54,8 @@ type UpDownCounter interface { } // Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. type Histogram interface { // Record adds an additional value to the distribution. Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go index c77a4672860..03b5d53e636 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -32,6 +34,8 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { // Add records a change to the counter. Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) @@ -40,6 +44,8 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { // Add records a change to the counter. Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) @@ -48,6 +54,8 @@ type UpDownCounter interface { } // Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. type Histogram interface { // Record adds an additional value to the distribution. Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 21fc1c499fb..23e6853afbb 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -26,6 +26,8 @@ import ( // MeterProvider provides access to named Meter instances, for instrumenting // an application or library. +// +// Warning: methods may be added to this interface in minor releases. type MeterProvider interface { // Meter creates an instance of a `Meter` interface. The instrumentationName // must be the name of the library providing instrumentation. This name may @@ -36,6 +38,8 @@ type MeterProvider interface { } // Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. type Meter interface { // AsyncInt64 is the namespace for the Asynchronous Integer instruments. // diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 0c756c46cc8..01bed6c6814 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -128,6 +128,12 @@ func Contains[E comparable](s []E, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. // In the returned slice r, r[i] == v[0]. @@ -165,6 +171,7 @@ func Delete[S ~[]E, E any](s S, i, j int) S { // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice tot := len(s[:i]) + len(v) + len(s[j:]) if tot <= cap(s) { s2 := s[:tot] @@ -193,7 +200,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s; it does not create a new slice. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -210,7 +217,7 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like Compact but uses a comparison function. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd102..35a5d8f06d4 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -30,7 +30,7 @@ func SortFunc[E any](x []E, less func(a, b E) bool) { pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) } -// SortStable sorts the slice x while keeping the original order of equal +// SortStableFunc sorts the slice x while keeping the original order of equal // elements, using less to compare elements. func SortStableFunc[E any](x []E, less func(a, b E) bool) { stableLessFunc(x, len(x), less) diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 75248fd16ec..65b125abd2c 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index e450fba31cb..63e5ef64c0d 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -598,17 +598,17 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GenerateAccessTokenResponse{ ServerResponse: googleapi.ServerResponse{ @@ -745,17 +745,17 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GenerateIdTokenResponse{ ServerResponse: googleapi.ServerResponse{ @@ -892,17 +892,17 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SignBlobResponse{ ServerResponse: googleapi.ServerResponse{ @@ -1039,17 +1039,17 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SignJwtResponse{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 00000000000..886c6532b15 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 9e416e92ba1..e96a3316453 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.102.0" +const Version = "0.104.0" diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index f56a8c1d906..b2085a1949a 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -96,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 35cea853eb1..4613ebdfa72 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2560,7 +2560,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -2708,17 +2708,17 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2865,17 +2865,17 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3025,17 +3025,17 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -3181,17 +3181,17 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3347,17 +3347,17 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3511,7 +3511,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -3688,17 +3688,17 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -3883,17 +3883,17 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4107,17 +4107,17 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4352,17 +4352,17 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ @@ -4553,17 +4553,17 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4801,17 +4801,17 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5019,17 +5019,17 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5182,17 +5182,17 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5432,17 +5432,17 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5631,7 +5631,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5744,7 +5744,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5892,17 +5892,17 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6050,17 +6050,17 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6227,17 +6227,17 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -6395,17 +6395,17 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6561,17 +6561,17 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6713,7 +6713,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -6859,17 +6859,17 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7019,17 +7019,17 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7181,17 +7181,17 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notifications{ ServerResponse: googleapi.ServerResponse{ @@ -7342,7 +7342,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -7516,17 +7516,17 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7699,17 +7699,17 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7885,17 +7885,17 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -8067,17 +8067,17 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8259,17 +8259,17 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8500,17 +8500,17 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -8858,17 +8858,17 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9165,7 +9165,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -9395,7 +9395,7 @@ func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() - return nil, err + return nil, gensupport.WrapError(err) } return res, nil } @@ -9414,17 +9414,17 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9639,17 +9639,17 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -10001,17 +10001,17 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) if rx != nil { @@ -10028,7 +10028,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } } ret := &Object{ @@ -10352,17 +10352,17 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Objects{ ServerResponse: googleapi.ServerResponse{ @@ -10674,17 +10674,17 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -11081,17 +11081,17 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11373,17 +11373,17 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -11563,17 +11563,17 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11828,17 +11828,17 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -12135,17 +12135,17 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Channel{ ServerResponse: googleapi.ServerResponse{ @@ -12344,17 +12344,17 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKey{ ServerResponse: googleapi.ServerResponse{ @@ -12493,7 +12493,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -12640,17 +12640,17 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -12841,17 +12841,17 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeysMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13043,17 +13043,17 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13211,17 +13211,17 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAccount{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index c86f56507f5..efcc8e6c641 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. @@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - var grpcOpts []grpc.DialOption + + var transportCreds credentials.TransportCredentials if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } @@ -142,8 +159,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C o.QuotaProject = internal.QuotaProjectFromCreds(creds) } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + // Attempt Direct Path: if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} if timeoutDialerOption != nil { @@ -153,9 +179,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p-experimental:///" + addr + endpoint = "google-c2p:///" + addr } else { - endpoint = "google-c2p-experimental:///" + endpoint + endpoint = "google-c2p:///" + endpoint } } else { if !strings.HasPrefix(endpoint, "dns:///") { @@ -169,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } } } diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go index 4bf9e821723..507cd3ec63a 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -12,7 +12,6 @@ import ( "net" "syscall" - "golang.org/x/sys/unix" "google.golang.org/grpc" ) @@ -20,6 +19,9 @@ const ( // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By // default is 20 seconds. tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 ) func init() { @@ -33,7 +35,7 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) }) if syscallErr != nil { return syscallErr diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 66fdb650f4b..ec7c602ecf9 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,17 +15,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/client.proto package annotations import ( reflect "reflect" + sync "sync" + api "google.golang.org/genproto/googleapis/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -35,6 +38,1047 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), @@ -78,26 +1122,26 @@ var ( // // For example, the proto RPC and annotation: // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } // // Would add the following Java overload (in addition to the method accepting // the request object): // - // public final Subscription createSubscription(String name, String topic) + // public final Subscription createSubscription(String name, String topic) // // The following backwards-compatibility guidelines apply: // - // * Adding this annotation to an unannotated method is backwards + // - Adding this annotation to an unannotated method is backwards // compatible. - // * Adding this annotation to a method which already has existing + // - Adding this annotation to a method which already has existing // method signature annotations is backwards compatible if and only if // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is + // - Modifying or removing an existing method signature annotation is // a breaking change. - // * Re-ordering existing method signature annotations is a breaking + // - Re-ordering existing method signature annotations is a breaking // change. // // repeated string method_signature = 1051; @@ -111,10 +1155,10 @@ var ( // // Example: // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } // // optional string default_host = 1049; E_DefaultHost = &file_google_api_client_proto_extTypes[1] @@ -122,22 +1166,22 @@ var ( // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } // // If there is more than one scope, use a comma-separated string: // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] @@ -148,44 +1192,278 @@ var File_google_api_client_proto protoreflect.FileDescriptor var file_google_api_client_proto_rawDesc = []byte{ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, 0x55, 0x72, 0x69, 0x12, + 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, + 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, + 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, + 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, + 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, + 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, + 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, + 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, + 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, + 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, + 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, + 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x94, 0x02, + 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, + 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, + 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, + 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, + 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x2a, + 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, + 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, + 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, + 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData } +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -193,18 +1471,178 @@ func file_google_api_client_proto_init() { if File_google_api_client_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, + NumEnums: 2, + NumMessages: 14, NumExtensions: 3, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, ExtensionInfos: file_google_api_client_proto_extTypes, }.Build() File_google_api_client_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 00000000000..71075313773 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go new file mode 100644 index 00000000000..9fb745926a5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go @@ -0,0 +1,208 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package iam aliases all exported identifiers in package +// "cloud.google.com/go/iam/apiv1/iampb". +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package iam + +import ( + src "cloud.google.com/go/iam/apiv1/iampb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb +const ( + AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED + AuditConfigDelta_ADD = src.AuditConfigDelta_ADD + AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE + AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ + AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ + AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE + AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED + BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED + BindingDelta_ADD = src.BindingDelta_ADD + BindingDelta_REMOVE = src.BindingDelta_REMOVE +) + +// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb +var ( + AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name + AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value + AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name + AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value + BindingDelta_Action_name = src.BindingDelta_Action_name + BindingDelta_Action_value = src.BindingDelta_Action_value + File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto + File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto + File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto +) + +// Specifies the audit configuration for a service. The configuration +// determines which permission types are logged, and what identities, if any, +// are exempted from logging. An AuditConfig must have one or more +// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a +// specific service, the union of the two AuditConfigs is used for that +// service: the log_types specified in each AuditConfig are enabled, and the +// exempted_members in each AuditLogConfig are exempted. Example Policy with +// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": +// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": +// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For +// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfig = src.AuditConfig + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta = src.AuditConfigDelta + +// The type of action performed on an audit configuration in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta_Action = src.AuditConfigDelta_Action + +// Provides the configuration for logging a type of permissions. Example: { +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables +// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from +// DATA_READ logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig = src.AuditLogConfig + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig_LogType = src.AuditLogConfig_LogType + +// Associates `members`, or principals, with a `role`. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Binding = src.Binding + +// One delta entry for Binding. Each individual change (only one member in +// each entry) to a binding will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta = src.BindingDelta + +// The type of action performed on a Binding in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta_Action = src.BindingDelta_Action + +// Request message for `GetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetIamPolicyRequest = src.GetIamPolicyRequest + +// Encapsulates settings provided to GetIamPolicy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetPolicyOptions = src.GetPolicyOptions + +// IAMPolicyClient is the client API for IAMPolicy service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyClient = src.IAMPolicyClient + +// IAMPolicyServer is the server API for IAMPolicy service. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyServer = src.IAMPolicyServer + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. A `Policy` is a collection of +// `bindings`. A `binding` binds one or more `members`, or principals, to a +// single `role`. Principals can be user accounts, service accounts, Google +// groups, and domains (such as G Suite). A `role` is a named list of +// permissions; each `role` can be an IAM predefined role or a user-created +// custom role. For some types of Google Cloud resources, a `binding` can also +// specify a `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or both. To +// learn which resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Policy = src.Policy + +// The difference delta between two policies. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type PolicyDelta = src.PolicyDelta + +// Request message for `SetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type SetIamPolicyRequest = src.SetIamPolicyRequest + +// Request message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsRequest = src.TestIamPermissionsRequest + +// Response message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsResponse = src.TestIamPermissionsResponse + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return src.NewIAMPolicyClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + src.RegisterIAMPolicyServer(s, srv) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 89b0234db29..6285a8e864a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,15 +4,16 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.12.1 +# cloud.google.com/go/compute v1.13.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.1 +# cloud.google.com/go/compute/metadata v0.2.2 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.6.0 +# cloud.google.com/go/iam v0.8.0 ## explicit; go 1.19 cloud.google.com/go/iam +cloud.google.com/go/iam/apiv1/iampb # cloud.google.com/go/storage v1.27.0 ## explicit; go 1.17 cloud.google.com/go/storage @@ -415,7 +416,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e +# github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 ## explicit; go 1.18 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 @@ -425,7 +426,7 @@ github.com/google/uuid ## explicit; go 1.18 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.6.0 +# github.com/googleapis/gax-go/v2 v2.7.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -434,7 +435,7 @@ github.com/googleapis/gax-go/v2/internal # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 github.com/gorilla/mux -# github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 +# github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd ## explicit; go 1.17 github.com/grafana/regexp github.com/grafana/regexp/syntax @@ -677,7 +678,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.39.0 +# github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 ## explicit; go 1.17 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -696,7 +697,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.40.7 +# github.com/prometheus/prometheus v0.41.0 ## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -795,7 +796,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 +# github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 ## explicit; go 1.19 github.com/thanos-community/promql-engine/api github.com/thanos-community/promql-engine/engine @@ -824,7 +825,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a +# github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -952,7 +953,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -971,7 +972,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp # go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 @@ -1019,7 +1020,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/metric v0.33.0 +# go.opentelemetry.io/otel/metric v0.34.0 ## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global @@ -1074,7 +1075,7 @@ golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 +# golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices @@ -1159,7 +1160,7 @@ golang.org/x/xerrors/internal gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/internal/asm/f64 -# google.golang.org/api v0.102.0 +# google.golang.org/api v0.104.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1192,8 +1193,9 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c +# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 ## explicit; go 1.19 +google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/iam/v1 From 38aff2c69bcdf5f9bd423b3614f8ab50ca7147c3 Mon Sep 17 00:00:00 2001 From: tanghengjian <1040104807@qq.com> Date: Tue, 1 Nov 2022 16:57:24 +0800 Subject: [PATCH 2/2] Push reduce one hash operation of Labels. Signed-off-by: tanghengjian <1040104807@qq.com> --- CHANGELOG.md | 1 + pkg/ingester/active_series.go | 31 +++------------------- pkg/ingester/active_series_test.go | 41 ++++++++++++++++++------------ pkg/ingester/ingester.go | 6 +++-- 4 files changed, 33 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e60b9c67768..98f44cd4512 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ * [ENHANCEMENT] Query Frontend: Log Vertical sharding information when `query_stats_enabled` is enabled. #5037 * [ENHANCEMENT] Ingester: The metadata APIs should honour `querier.query-ingesters-within` when `querier.query-store-for-labels-enabled` is true. #5027 * [ENHANCEMENT] Query Frontend: Skip instant query roundtripper if sharding is not applicable. #5062 +* [ENHANCEMENT] Push reduce one hash operation of Labels. #4945 #5114 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index ba3e2760d37..5285f279639 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -1,17 +1,12 @@ package ingester import ( - "hash" "math" "sync" "time" - "github.com/cespare/xxhash" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -53,30 +48,10 @@ func NewActiveSeries() *ActiveSeries { } // Updates series timestamp to 'now'. Function is called to make a copy of labels if entry doesn't exist yet. -func (c *ActiveSeries) UpdateSeries(series labels.Labels, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { - fp := fingerprint(series) - stripeID := fp % numActiveSeriesStripes - - c.stripes[stripeID].updateSeriesTimestamp(now, series, fp, labelsCopy) -} - -var sep = []byte{model.SeparatorByte} - -var hashPool = sync.Pool{New: func() interface{} { return xxhash.New() }} - -func fingerprint(series labels.Labels) uint64 { - sum := hashPool.Get().(hash.Hash64) - defer hashPool.Put(sum) - - sum.Reset() - for _, label := range series { - _, _ = sum.Write(util.YoloBuf(label.Name)) - _, _ = sum.Write(sep) - _, _ = sum.Write(util.YoloBuf(label.Value)) - _, _ = sum.Write(sep) - } +func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { + stripeID := hash % numActiveSeriesStripes - return sum.Sum64() + c.stripes[stripeID].updateSeriesTimestamp(now, series, hash, labelsCopy) } // Purge removes expired entries from the cache. This function should be called diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index ab3c63db15d..02a5477194c 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -8,6 +8,7 @@ import ( "sync" "testing" "time" + "unsafe" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" @@ -15,20 +16,25 @@ import ( func copyFn(l labels.Labels) labels.Labels { return l } +func fromLabelToLabels(ls []labels.Label) labels.Labels { + return *(*labels.Labels)(unsafe.Pointer(&ls)) +} + func TestActiveSeries_UpdateSeries(t *testing.T) { ls1 := []labels.Label{{Name: "a", Value: "1"}} ls2 := []labels.Label{{Name: "a", Value: "2"}} c := NewActiveSeries() assert.Equal(t, 0, c.Active()) - - c.UpdateSeries(ls1, time.Now(), copyFn) + labels1Hash := fromLabelToLabels(ls1).Hash() + labels2Hash := fromLabelToLabels(ls2).Hash() + c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls1, time.Now(), copyFn) + c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls2, time.Now(), copyFn) + c.UpdateSeries(ls2, labels2Hash, time.Now(), copyFn) assert.Equal(t, 2, c.Active()) } @@ -46,7 +52,7 @@ func TestActiveSeries_Purge(t *testing.T) { c := NewActiveSeries() for i := 0; i < len(series); i++ { - c.UpdateSeries(series[i], time.Unix(int64(i), 0), copyFn) + c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), copyFn) } c.Purge(time.Unix(int64(ttl+1), 0)) @@ -62,24 +68,23 @@ func TestActiveSeries_PurgeOpt(t *testing.T) { metric := labels.NewBuilder(labels.FromStrings("__name__", "logs")) ls1 := metric.Set("_", "ypfajYg2lsv").Labels(nil) ls2 := metric.Set("_", "KiqbryhzUpn").Labels(nil) - c := NewActiveSeries() now := time.Now() - c.UpdateSeries(ls1, now.Add(-2*time.Minute), copyFn) - c.UpdateSeries(ls2, now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-2*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls1, now.Add(-1*time.Minute), copyFn) - c.UpdateSeries(ls2, now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-1*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) // This will *not* update the series, since there is already newer timestamp. - c.UpdateSeries(ls2, now.Add(-1*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now.Add(-1*time.Minute), copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) @@ -105,7 +110,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) wg := &sync.WaitGroup{} start := make(chan struct{}) max := int(math.Ceil(float64(b.N) / float64(goroutines))) - + labelhash := series.Hash() for i := 0; i < goroutines; i++ { wg.Add(1) go func() { @@ -116,7 +121,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) for ix := 0; ix < max; ix++ { now = now.Add(time.Duration(ix) * time.Millisecond) - c.UpdateSeries(series, now, copyFn) + c.UpdateSeries(series, labelhash, now, copyFn) } }() } @@ -137,15 +142,17 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { name := nameBuf.String() series := make([]labels.Labels, b.N) + labelhash := make([]uint64, b.N) for s := 0; s < b.N; s++ { series[s] = labels.Labels{{Name: name, Value: name + strconv.Itoa(s)}} + labelhash[s] = series[s].Hash() } now := time.Now().UnixNano() b.ResetTimer() for ix := 0; ix < b.N; ix++ { - c.UpdateSeries(series[ix], time.Unix(0, now+int64(ix)), copyFn) + c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), copyFn) } } @@ -165,8 +172,10 @@ func benchmarkPurge(b *testing.B, twice bool) { c := NewActiveSeries() series := [numSeries]labels.Labels{} + labelhash := [numSeries]uint64{} for s := 0; s < numSeries; s++ { series[s] = labels.Labels{{Name: "a", Value: strconv.Itoa(s)}} + labelhash[s] = series[s].Hash() } for i := 0; i < b.N; i++ { @@ -175,9 +184,9 @@ func benchmarkPurge(b *testing.B, twice bool) { // Prepare series for ix, s := range series { if ix < numExpiresSeries { - c.UpdateSeries(s, now.Add(-time.Minute), copyFn) + c.UpdateSeries(s, labelhash[ix], now.Add(-time.Minute), copyFn) } else { - c.UpdateSeries(s, now, copyFn) + c.UpdateSeries(s, labelhash[ix], now, copyFn) } } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 7d417251fcc..7a145edfad0 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -967,7 +967,9 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte // has sorted labels once hit the ingester). // Look up a reference for this series. - ref, copiedLabels := app.GetRef(cortexpb.FromLabelAdaptersToLabels(ts.Labels)) + tsLabels := cortexpb.FromLabelAdaptersToLabels(ts.Labels) + tsLabelsHash := tsLabels.Hash() + ref, copiedLabels := app.GetRef(tsLabels, tsLabelsHash) // To find out if any sample was added to this series, we keep old value. oldSucceededSamplesCount := succeededSamplesCount @@ -1037,7 +1039,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte } if i.cfg.ActiveSeriesMetricsEnabled && succeededSamplesCount > oldSucceededSamplesCount { - db.activeSeries.UpdateSeries(cortexpb.FromLabelAdaptersToLabels(ts.Labels), startAppend, func(l labels.Labels) labels.Labels { + db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, func(l labels.Labels) labels.Labels { // we must already have copied the labels if succeededSamplesCount has been incremented. return copiedLabels })