From d4a652668b905ec7e2df8c9b2ddeee66857733bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Mon, 19 Oct 2020 12:41:23 +0200 Subject: [PATCH 1/8] Created new version of frontend package, with separate scheduler component. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- CHANGELOG.md | 1 + .../tsdb-blocks-storage-s3/config/cortex.yaml | 4 + .../config/grafana-agent.yaml | 10 +- .../config/prometheus.yaml | 10 +- .../tsdb-blocks-storage-s3/docker-compose.yml | 65 + docs/configuration/config-file-reference.md | 127 +- docs/configuration/v1-guarantees.md | 1 + docs/guides/shuffle-sharding.md | 2 +- pkg/api/api.go | 17 +- pkg/cortex/cortex.go | 71 +- pkg/cortex/modules.go | 79 +- pkg/querier/frontend/config.go | 111 + .../frontend/downstream_roundtripper.go | 40 + pkg/querier/frontend/frontend.go | 191 +- pkg/querier/frontend/frontend_test.go | 39 +- pkg/querier/frontend/handler.go | 172 ++ pkg/querier/frontend/worker.go | 13 +- pkg/querier/frontend2/dns_watcher.go | 80 + pkg/querier/frontend2/frontend.pb.go | 2309 +++++++++++++++++ pkg/querier/frontend2/frontend.proto | 94 + pkg/querier/frontend2/frontend2.go | 290 +++ pkg/querier/frontend2/frontend2_test.go | 258 ++ .../frontend2/frontend_querier_queues.go | 224 ++ .../frontend2/frontend_querier_queues_test.go | 291 +++ .../frontend2/frontend_scheduler_worker.go | 303 +++ .../frontend2/querier_scheduler_worker.go | 434 ++++ pkg/querier/frontend2/scheduler.go | 510 ++++ pkg/querier/frontend2/scheduler_test.go | 379 +++ pkg/querier/queryrange/retry.go | 4 +- pkg/querier/queryrange/roundtrip.go | 20 +- pkg/util/fakeauth/fake_auth.go | 22 +- pkg/util/validation/limits.go | 2 +- tools/doc-generator/main.go | 4 +- 33 files changed, 5873 insertions(+), 304 deletions(-) create mode 100644 pkg/querier/frontend/config.go create mode 100644 pkg/querier/frontend/downstream_roundtripper.go create mode 100644 pkg/querier/frontend/handler.go create mode 100644 pkg/querier/frontend2/dns_watcher.go create mode 100644 pkg/querier/frontend2/frontend.pb.go create mode 100644 pkg/querier/frontend2/frontend.proto create mode 100644 pkg/querier/frontend2/frontend2.go create mode 100644 pkg/querier/frontend2/frontend2_test.go create mode 100644 pkg/querier/frontend2/frontend_querier_queues.go create mode 100644 pkg/querier/frontend2/frontend_querier_queues_test.go create mode 100644 pkg/querier/frontend2/frontend_scheduler_worker.go create mode 100644 pkg/querier/frontend2/querier_scheduler_worker.go create mode 100644 pkg/querier/frontend2/scheduler.go create mode 100644 pkg/querier/frontend2/scheduler_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e796d21901..7b302f21db0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ * [ENHANCEMENT] Blocks storage: reduced number of bucket listing operations to list block content (applies to newly created blocks only). #3363 * [ENHANCEMENT] Ruler: Include the tenant ID on the notifier logs. #3372 * [ENHANCEMENT] Blocks storage Compactor: Added `-compactor.enabled-tenants` and `-compactor.disabled-tenants` to explicitly enable or disable compaction of specific tenants. #3385 +* [ENHANCEMENT] Query-Frontend / Query-Scheduler: New component called "Query-Scheduler" has been introduced. Query-Scheduler is simply a queue of requests, moved outside of Query-Frontend. This allows Query-Frontend to be scaled separately from number of queues. To make Query-Frontend and Querier use Query-Scheduler, they need to be started with `-frontend.scheduler-address` and `-querier.scheduler-address` options respectively. #3374 * [BUGFIX] No-longer-needed ingester operations for queries triggered by queriers and rulers are now canceled. #3178 * [BUGFIX] Ruler: directories in the configured `rules-path` will be removed on startup and shutdown in order to ensure they don't persist between runs. #3195 * [BUGFIX] Handle hash-collisions in the query path. #3192 diff --git a/development/tsdb-blocks-storage-s3/config/cortex.yaml b/development/tsdb-blocks-storage-s3/config/cortex.yaml index cd1fc9117a1..7fbe1128938 100644 --- a/development/tsdb-blocks-storage-s3/config/cortex.yaml +++ b/development/tsdb-blocks-storage-s3/config/cortex.yaml @@ -120,6 +120,10 @@ store_gateway: frontend_worker: frontend_address: "query-frontend:9007" + match_max_concurrent: true + + # By setting scheduler_address, querier worker would use scheduler instead of frontend. + # scheduler_address: "query-scheduler:9012" query_range: split_queries_by_interval: 24h diff --git a/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml b/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml index 245cc90d858..164b409852b 100644 --- a/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml +++ b/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml @@ -25,7 +25,7 @@ prometheus: namespace: 'tsdb-blocks-storage-s3' - job_name: tsdb-blocks-storage-s3/querier static_configs: - - targets: ['querier:8004'] + - targets: ['querier:8004', 'querier-with-scheduler:8013'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -43,7 +43,7 @@ prometheus: namespace: 'tsdb-blocks-storage-s3' - job_name: tsdb-blocks-storage-s3/query-frontend static_configs: - - targets: ['query-frontend:8007'] + - targets: ['query-frontend:8007', 'query-frontend-with-scheduler:8012'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -53,6 +53,12 @@ prometheus: labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' + - job_name: tsdb-blocks-storage-s3/query-scheduler + static_configs: + - targets: ['query-scheduler:8011'] + labels: + cluster: 'docker-compose' + namespace: 'tsdb-blocks-storage-s3' remote_write: - url: http://distributor:8001/api/prom/push diff --git a/development/tsdb-blocks-storage-s3/config/prometheus.yaml b/development/tsdb-blocks-storage-s3/config/prometheus.yaml index ae916087aba..be12319f077 100644 --- a/development/tsdb-blocks-storage-s3/config/prometheus.yaml +++ b/development/tsdb-blocks-storage-s3/config/prometheus.yaml @@ -18,7 +18,7 @@ scrape_configs: namespace: 'tsdb-blocks-storage-s3' - job_name: tsdb-blocks-storage-s3/querier static_configs: - - targets: ['querier:8004'] + - targets: ['querier:8004', 'query-frontend-with-scheduler:8013'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -36,7 +36,7 @@ scrape_configs: namespace: 'tsdb-blocks-storage-s3' - job_name: tsdb-blocks-storage-s3/query-frontend static_configs: - - targets: ['query-frontend:8007'] + - targets: ['query-frontend:8007', 'query-frontend-with-scheduler:8012'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -46,6 +46,12 @@ scrape_configs: labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' + - job_name: tsdb-blocks-storage-s3/query-scheduler + static_configs: + - targets: ['query-scheduler:8011'] + labels: + cluster: 'docker-compose' + namespace: 'tsdb-blocks-storage-s3' remote_write: - url: http://distributor:8001/api/prom/push diff --git a/development/tsdb-blocks-storage-s3/docker-compose.yml b/development/tsdb-blocks-storage-s3/docker-compose.yml index d9313f6f883..c610c8699c3 100644 --- a/development/tsdb-blocks-storage-s3/docker-compose.yml +++ b/development/tsdb-blocks-storage-s3/docker-compose.yml @@ -271,3 +271,68 @@ services: - 18022:18022 volumes: - ./config:/cortex/config + + query-scheduler: + build: + context: . + dockerfile: dev.dockerfile + image: cortex + command: ["sh", "-c", "sleep 3 && exec ./dlv exec ./cortex --listen=:18011 --headless=true --api-version=2 --accept-multiclient --continue -- -config.file=./config/cortex.yaml -target=query-scheduler -server.http-listen-port=8011 -server.grpc-listen-port=9011 -store.max-query-length=8760h -log.level=debug"] + depends_on: + - consul + - minio + environment: + - JAEGER_AGENT_HOST=jaeger + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=query-scheduler + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 + ports: + - 8011:8011 + - 18011:18011 + volumes: + - ./config:/cortex/config + + # This frontend uses query-scheduler, activated by `-frontend.scheduler-address` option. + query-frontend-with-scheduler: + build: + context: . + dockerfile: dev.dockerfile + image: cortex + command: ["sh", "-c", "sleep 3 && exec ./dlv exec ./cortex --listen=:18012 --headless=true --api-version=2 --accept-multiclient --continue -- -config.file=./config/cortex.yaml -target=query-frontend -server.http-listen-port=8012 -server.grpc-listen-port=9012 -store.max-query-length=8760h -frontend.scheduler-address=query-scheduler:9011 -log.level=debug"] + depends_on: + - consul + - minio + environment: + - JAEGER_AGENT_HOST=jaeger + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=query-frontend2 + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 + ports: + - 8012:8012 + - 18012:18012 + volumes: + - ./config:/cortex/config + + # This querier is connecting to query-scheduler, instead of query-frontend. This is achieved by setting -querier.scheduler-address="..." + querier-with-scheduler: + build: + context: . + dockerfile: dev.dockerfile + image: cortex + command: ["sh", "-c", "sleep 3 && exec ./dlv exec ./cortex --listen=:18013 --headless=true --api-version=2 --accept-multiclient --continue -- -config.file=./config/cortex.yaml -target=querier -server.http-listen-port=8013 -server.grpc-listen-port=9013 -querier.scheduler-address=query-scheduler:9011 -log.level=debug"] + depends_on: + - consul + - minio + environment: + - JAEGER_AGENT_HOST=jaeger + - JAEGER_AGENT_PORT=6831 + - JAEGER_TAGS=app=querier-scheduler + - JAEGER_SAMPLER_TYPE=const + - JAEGER_SAMPLER_PARAM=1 + ports: + - 8013:8013 + - 18013:18013 + volumes: + - ./config:/cortex/config diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 8518828b919..fafc24f0c92 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -157,6 +157,12 @@ runtime_config: # The memberlist_config configures the Gossip memberlist. [memberlist: ] + +query_scheduler: + # Maximum number of outstanding requests per tenant per query-scheduler; + # requests beyond this error with HTTP 429. + # CLI flag: -query-scheduler.max-outstanding-requests-per-tenant + [maxoutstandingpertenant: | default = 100] ``` ### `server_config` @@ -757,11 +763,99 @@ store_gateway_client: The `query_frontend_config` configures the Cortex query-frontend. ```yaml +# Log queries that are slower than the specified duration. Set to 0 to disable. +# Set to < 0 to enable on all queries. +# CLI flag: -frontend.log-queries-longer-than +[log_queries_longer_than: | default = 0s] + +# Max body size for downstream prometheus. +# CLI flag: -frontend.max-body-size +[max_body_size: | default = 10485760] + # Maximum number of outstanding requests per tenant per frontend; requests # beyond this error with HTTP 429. # CLI flag: -querier.max-outstanding-requests-per-tenant [max_outstanding_per_tenant: | default = 100] +# DNS hostname used for finding schedulers. +# CLI flag: -frontend.scheduler-address +[scheduler_address: | default = ""] + +# How often to query DNS. +# CLI flag: -frontend.scheduler-dns-lookup-period +[scheduler_dns_lookup_period: | default = 10s] + +# Number of goroutines pushing requests to +# CLI flag: -frontend.scheduler-worker-concurrency +[scheduler_worker_concurrency: | default = 5] + +grpc_client_config: + # gRPC client max receive message size (bytes). + # CLI flag: -frontend.grpc-client-config.grpc-max-recv-msg-size + [max_recv_msg_size: | default = 104857600] + + # gRPC client max send message size (bytes). + # CLI flag: -frontend.grpc-client-config.grpc-max-send-msg-size + [max_send_msg_size: | default = 16777216] + + # Deprecated: Use gzip compression when sending messages. If true, overrides + # grpc-compression flag. + # CLI flag: -frontend.grpc-client-config.grpc-use-gzip-compression + [use_gzip_compression: | default = false] + + # Use compression when sending messages. Supported values are: 'gzip', + # 'snappy' and '' (disable compression) + # CLI flag: -frontend.grpc-client-config.grpc-compression + [grpc_compression: | default = ""] + + # Rate limit for gRPC client; 0 means disabled. + # CLI flag: -frontend.grpc-client-config.grpc-client-rate-limit + [rate_limit: | default = 0] + + # Rate limit burst for gRPC client. + # CLI flag: -frontend.grpc-client-config.grpc-client-rate-limit-burst + [rate_limit_burst: | default = 0] + + # Enable backoff and retry when we hit ratelimits. + # CLI flag: -frontend.grpc-client-config.backoff-on-ratelimits + [backoff_on_ratelimits: | default = false] + + backoff_config: + # Minimum delay when backing off. + # CLI flag: -frontend.grpc-client-config.backoff-min-period + [min_period: | default = 100ms] + + # Maximum delay when backing off. + # CLI flag: -frontend.grpc-client-config.backoff-max-period + [max_period: | default = 10s] + + # Number of times to backoff and retry before failing. + # CLI flag: -frontend.grpc-client-config.backoff-retries + [max_retries: | default = 10] + + # Path to the client certificate file, which will be used for authenticating + # with the server. Also requires the key path to be configured. + # CLI flag: -frontend.grpc-client-config.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the client + # certificate to be configured. + # CLI flag: -frontend.grpc-client-config.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. If + # not set, the host's root CA certificates are used. + # CLI flag: -frontend.grpc-client-config.tls-ca-path + [tls_ca_path: | default = ""] + + # Skip validating server certificate. + # CLI flag: -frontend.grpc-client-config.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + +# Name of network interface to read address from. +# CLI flag: -frontend.interface +[interface_names: | default = [eth0 en0]] + # Compress HTTP responses. # CLI flag: -querier.compress-http-responses [compress_responses: | default = false] @@ -769,15 +863,6 @@ The `query_frontend_config` configures the Cortex query-frontend. # URL of downstream Prometheus. # CLI flag: -frontend.downstream-url [downstream_url: | default = ""] - -# Max body size for downstream prometheus. -# CLI flag: -frontend.max-body-size -[max_body_size: | default = 10485760] - -# Log queries that are slower than the specified duration. Set to 0 to disable. -# Set to < 0 to enable on all queries. -# CLI flag: -frontend.log-queries-longer-than -[log_queries_longer_than: | default = 0s] ``` ### `query_range_config` @@ -2446,7 +2531,10 @@ grpc_client_config: The `frontend_worker_config` configures the worker - running within the Cortex querier - picking up and executing queries enqueued by the query-frontend. ```yaml -# Address of query frontend service, in host:port format. +# Address of query frontend service, in host:port format. If +# -querier.scheduler-address is set as well, querier will use scheduler instead. +# If neither -querier.frontend-address or -querier.scheduler-address is set, +# queries must arrive via HTTP endpoint. # CLI flag: -querier.frontend-address [frontend_address: | default = ""] @@ -2530,6 +2618,16 @@ grpc_client_config: # Skip validating server certificate. # CLI flag: -querier.frontend-client.tls-insecure-skip-verify [tls_insecure_skip_verify: | default = false] + +# Hostname (and port) of scheduler that querier will periodically resolve, +# connect to and receive queries from. If set, takes precedence over +# -querier.frontend-address. +# CLI flag: -querier.scheduler-address +[scheduler_address: | default = ""] + +# How often to resolve scheduler hostname. +# CLI flag: -querier.scheduler-dns-lookup-period +[scheduler_dns_lookup_period: | default = 10s] ``` ### `etcd_config` @@ -2895,10 +2993,11 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # Maximum number of queriers that can handle requests for a single tenant. If # set to 0 or value higher than number of available queriers, *all* queriers -# will handle requests for the tenant. Each frontend will select the same set of -# queriers for the same tenant (given that all queriers are connected to all -# frontends). This option only works with queriers connecting to the -# query-frontend, not when using downstream URL. +# will handle requests for the tenant. Each frontend (or query-scheduler, if +# used) will select the same set of queriers for the same tenant (given that all +# queriers are connected to all frontends / query-schedulers). This option only +# works with queriers connecting to the query-frontend / query-scheduler, not +# when using downstream URL. # CLI flag: -frontend.max-queriers-per-tenant [max_queriers_per_tenant: | default = 0] diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 3a7bda26042..e9251688874 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -53,3 +53,4 @@ Currently experimental features are: - Blocksconvert tools - OpenStack Swift storage support. - Metric relabeling in the distributor. +- Scalable query-frontend (when using query-scheduler) diff --git a/docs/guides/shuffle-sharding.md b/docs/guides/shuffle-sharding.md index 7bdfa001b04..7a5e9568d20 100644 --- a/docs/guides/shuffle-sharding.md +++ b/docs/guides/shuffle-sharding.md @@ -80,7 +80,7 @@ _The shard size can be overridden on a per-tenant basis in the limits overrides By default all Cortex queriers can execute received queries for given tenant. -When shuffle sharding is **enabled** by setting `-frontend.max-queriers-per-tenant` (or its respective YAML config option) to a value higher than 0 and lower than the number of available queriers, only specified number of queriers will execute queries for single tenant. Note that this distribution happens in query-frontend. When not using query-frontend, this option is not available. +When shuffle sharding is **enabled** by setting `-frontend.max-queriers-per-tenant` (or its respective YAML config option) to a value higher than 0 and lower than the number of available queriers, only specified number of queriers will execute queries for single tenant. Note that this distribution happens in query-frontend, or query-scheduler if used. When not using query-frontend, this option is not available. _The maximum number of queriers can be overridden on a per-tenant basis in the limits overrides configuration._ diff --git a/pkg/api/api.go b/pkg/api/api.go index faa4aaa0e44..e5ff5823b88 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -22,6 +22,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/querier/frontend2" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/storegateway" @@ -308,9 +309,21 @@ func (a *API) RegisterQueryAPI(handler http.Handler) { // RegisterQueryFrontend registers the Prometheus routes supported by the // Cortex querier service. Currently this can not be registered simultaneously // with the Querier. -func (a *API) RegisterQueryFrontend(f *frontend.Frontend) { +func (a *API) RegisterQueryFrontendHandler(h http.Handler) { + a.RegisterQueryAPI(h) +} + +func (a *API) RegisterQueryFrontend1(f *frontend.Frontend) { frontend.RegisterFrontendServer(a.server.GRPC, f) - a.RegisterQueryAPI(f.Handler()) +} + +func (a *API) RegisterQueryFrontend2(f *frontend2.Frontend2) { + frontend2.RegisterFrontendForQuerierServer(a.server.GRPC, f) +} + +func (a *API) RegisterQueryScheduler(f *frontend2.Scheduler) { + frontend2.RegisterSchedulerForFrontendServer(a.server.GRPC, f) + frontend2.RegisterSchedulerForQuerierServer(a.server.GRPC, f) } // RegisterServiceMapHandler registers the Cortex structs service handler diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index df5036ef757..29b30899641 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -36,6 +36,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/querier/frontend2" "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" @@ -77,33 +78,34 @@ type Config struct { PrintConfig bool `yaml:"-"` HTTPPrefix string `yaml:"http_prefix"` - API api.Config `yaml:"api"` - Server server.Config `yaml:"server"` - Distributor distributor.Config `yaml:"distributor"` - Querier querier.Config `yaml:"querier"` - IngesterClient client.Config `yaml:"ingester_client"` - Ingester ingester.Config `yaml:"ingester"` - Flusher flusher.Config `yaml:"flusher"` - Storage storage.Config `yaml:"storage"` - ChunkStore chunk.StoreConfig `yaml:"chunk_store"` - Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) - LimitsConfig validation.Limits `yaml:"limits"` - Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` - Worker frontend.WorkerConfig `yaml:"frontend_worker"` - Frontend frontend.Config `yaml:"frontend"` - QueryRange queryrange.Config `yaml:"query_range"` - TableManager chunk.TableManagerConfig `yaml:"table_manager"` - Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. - BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` - Compactor compactor.Config `yaml:"compactor"` - StoreGateway storegateway.Config `yaml:"store_gateway"` - PurgerConfig purger.Config `yaml:"purger"` - - Ruler ruler.Config `yaml:"ruler"` - Configs configs.Config `yaml:"configs"` - Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` - RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` - MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + API api.Config `yaml:"api"` + Server server.Config `yaml:"server"` + Distributor distributor.Config `yaml:"distributor"` + Querier querier.Config `yaml:"querier"` + IngesterClient client.Config `yaml:"ingester_client"` + Ingester ingester.Config `yaml:"ingester"` + Flusher flusher.Config `yaml:"flusher"` + Storage storage.Config `yaml:"storage"` + ChunkStore chunk.StoreConfig `yaml:"chunk_store"` + Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) + LimitsConfig validation.Limits `yaml:"limits"` + Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` + Worker frontend.CombinedWorkerConfig `yaml:"frontend_worker"` + Frontend frontend.CombinedFrontendConfig `yaml:"frontend"` + QueryRange queryrange.Config `yaml:"query_range"` + TableManager chunk.TableManagerConfig `yaml:"table_manager"` + Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` + Compactor compactor.Config `yaml:"compactor"` + StoreGateway storegateway.Config `yaml:"store_gateway"` + PurgerConfig purger.Config `yaml:"purger"` + + Ruler ruler.Config `yaml:"ruler"` + Configs configs.Config `yaml:"configs"` + Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` + RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + QuerySchedulerConfig frontend2.SchedulerConfig `yaml:"query_scheduler"` } // RegisterFlags registers flag. @@ -149,6 +151,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Alertmanager.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) c.MemberlistKV.RegisterFlags(f, "") + c.QuerySchedulerConfig.RegisterFlags(f) // These don't seem to have a home. f.IntVar(&chunk_util.QueryParallelism, "querier.query-parallelism", 100, "Max subqueries run in parallel per higher-level query.") @@ -266,7 +269,7 @@ type Cortex struct { TombstonesLoader *purger.TombstonesLoader QuerierQueryable prom_storage.SampleAndChunkQueryable QuerierEngine *promql.Engine - QueryFrontendTripperware frontend.Tripperware + QueryFrontendTripperware queryrange.Tripperware Ruler *ruler.Ruler RulerStorage rules.RuleStore @@ -293,11 +296,15 @@ func New(cfg Config) (*Cortex, error) { // Don't check auth header on TransferChunks, as we weren't originally // sending it and this could cause transfers to fail on update. - // - // Also don't check auth /frontend.Frontend/Process, as this handles - // queries for multiple users. cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, - []string{"/cortex.Ingester/TransferChunks", "/frontend.Frontend/Process"}) + // Also don't check auth for these gRPC methods, since single call is used for multiple users (or no user like health check). + []string{ + "/grpc.health.v1.Health/Check", + "/cortex.Ingester/TransferChunks", + "/frontend.Frontend/Process", + "/frontend2.SchedulerForFrontend/FrontendLoop", + "/frontend2.SchedulerForQuerier/QuerierLoop", + }) cortex := &Cortex{ Cfg: cfg, diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index eb6e638d349..1eb6714d8d2 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -5,12 +5,13 @@ import ( "os" "time" + "github.com/NYTimes/gziphandler" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" prom_storage "github.com/prometheus/prometheus/storage" - httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" @@ -27,6 +28,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/querier/frontend2" "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" @@ -69,6 +71,7 @@ const ( StoreGateway string = "store-gateway" MemberlistKV string = "memberlist-kv" Purger string = "purger" + QueryScheduler string = "query-scheduler" All string = "all" ) @@ -267,10 +270,10 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { util.Logger, ) - // If the querier is running standalone without the query-frontend, we must register it's internal + // If the querier is running standalone without the query-frontend or query-scheduler, we must register it's internal // HTTP handler externally and provide the external Cortex Server HTTP handler to the frontend worker // to ensure requests it processes use the default middleware instrumentation. - if !t.Cfg.isModuleEnabled(QueryFrontend) && !t.Cfg.isModuleEnabled(All) { + if !t.Cfg.isModuleEnabled(QueryFrontend) && !t.Cfg.isModuleEnabled(QueryScheduler) && !t.Cfg.isModuleEnabled(All) { // First, register the internal querier handler with the external HTTP server t.API.RegisterQueryAPI(internalQuerierRouter) @@ -279,12 +282,12 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // and internal using the default instrumentation when running as a standalone service. internalQuerierRouter = t.Server.HTTPServer.Handler } else { - // Single binary mode requires a query frontend endpoint for the worker. If no frontend endpoint - // is configured, Cortex will default to using localhost on it's own GRPC listening port. - if t.Cfg.Worker.Address == "" { + // Single binary mode requires a query frontend endpoint for the worker. If no frontend or scheduler endpoint + // is configured, Cortex will default to using frontend on localhost on it's own GRPC listening port. + if t.Cfg.Worker.WorkerV1.FrontendAddress == "" || t.Cfg.Worker.WorkerV2.SchedulerAddr == "" { address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) - t.Cfg.Worker.Address = address + t.Cfg.Worker.WorkerV1.FrontendAddress = address } // If queries are processed using the external HTTP Server, we need wrap the internal querier with @@ -293,14 +296,8 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { internalQuerierRouter = middleware.AuthenticateUser.Wrap(internalQuerierRouter) } - // Query frontend worker will only be started after all its dependencies are started, not here. - // Worker may also be nil, if not configured, which is OK. - worker, err := frontend.NewWorker(t.Cfg.Worker, t.Cfg.Querier, httpgrpc_server.NewServer(internalQuerierRouter), util.Logger) - if err != nil { - return nil, err - } - - return worker, nil + // If neither frontend address or scheduler address is configured, no worker will be created. + return t.Cfg.Worker.InitQuerierWorker(t.Cfg.Querier, internalQuerierRouter, util.Logger) } func (t *Cortex) initStoreQueryables() (services.Service, error) { @@ -463,9 +460,9 @@ func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { // Load the schema only if sharded queries is set. if t.Cfg.QueryRange.ShardedQueries { - err = t.Cfg.Schema.Load() + err := t.Cfg.Schema.Load() if err != nil { - return + return nil, err } } @@ -506,18 +503,36 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro } func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { - t.Frontend, err = frontend.New(t.Cfg.Frontend, t.Overrides, util.Logger, prometheus.DefaultRegisterer) + roundTripper, frontendV1, frontendV2, err := t.Cfg.Frontend.InitFrontend(t.Overrides, t.Cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) if err != nil { - return + return nil, err } - t.Frontend.Wrap(t.QueryFrontendTripperware) - t.API.RegisterQueryFrontend(t.Frontend) + // Wrap roundtripper into Tripperware. + t.QueryFrontendTripperware(roundTripper) - return services.NewIdleService(nil, func(_ error) error { - t.Frontend.Close() - return nil - }), nil + handler := frontend.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util.Logger) + if t.Cfg.Frontend.CompressResponses { + handler = gziphandler.GzipHandler(handler) + } + + t.API.RegisterQueryFrontendHandler(handler) + + if frontendV1 != nil { + t.API.RegisterQueryFrontend1(frontendV1) + t.Frontend = frontendV1 + + return services.NewIdleService(nil, func(_ error) error { + frontendV1.Close() + return nil + }), nil + } else if frontendV2 != nil { + t.API.RegisterQueryFrontend2(frontendV2) + + return frontendV2, nil + } + + return nil, nil } func (t *Cortex) initTableManager() (services.Service, error) { @@ -723,6 +738,16 @@ func (t *Cortex) initPurger() (services.Service, error) { return t.Purger, nil } +func (t *Cortex) initQueryScheduler() (services.Service, error) { + s, err := frontend2.NewScheduler(t.Cfg.QuerySchedulerConfig, t.Overrides, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, errors.Wrap(err, "query-scheduler init") + } + + t.API.RegisterQueryScheduler(s) + return s, nil +} + func (t *Cortex) setupModuleManager() error { mm := modules.NewManager() @@ -754,6 +779,7 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(Compactor, t.initCompactor) mm.RegisterModule(StoreGateway, t.initStoreGateway) mm.RegisterModule(Purger, t.initPurger) + mm.RegisterModule(QueryScheduler, t.initQueryScheduler) mm.RegisterModule(All, nil) // Add dependencies @@ -770,8 +796,9 @@ func (t *Cortex) setupModuleManager() error { Queryable: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, Querier: {Queryable}, StoreQueryable: {Overrides, Store, MemberlistKV}, - QueryFrontend: {QueryFrontendTripperware}, QueryFrontendTripperware: {API, Overrides, DeleteRequestsStore}, + QueryFrontend: {QueryFrontendTripperware}, + QueryScheduler: {API, Overrides}, TableManager: {API}, Ruler: {Overrides, DistributorService, Store, StoreQueryable, RulerStorage}, Configs: {API}, diff --git a/pkg/querier/frontend/config.go b/pkg/querier/frontend/config.go new file mode 100644 index 00000000000..697dc3cad4a --- /dev/null +++ b/pkg/querier/frontend/config.go @@ -0,0 +1,111 @@ +package frontend + +import ( + "flag" + "net/http" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" + + "github.com/cortexproject/cortex/pkg/querier" + "github.com/cortexproject/cortex/pkg/querier/frontend2" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// This struct combines several configuration options together to preserve backwards compatibility. +type CombinedFrontendConfig struct { + Handler HandlerConfig `yaml:",inline"` + Frontend Config `yaml:",inline"` + Frontend2 frontend2.Config `yaml:",inline"` + + CompressResponses bool `yaml:"compress_responses"` + DownstreamURL string `yaml:"downstream_url"` +} + +func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { + cfg.Handler.RegisterFlags(f) + cfg.Frontend.RegisterFlags(f) + cfg.Frontend2.RegisterFlags(f) + + f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") + f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") +} + +func (cfg *CombinedFrontendConfig) InitFrontend(limits Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *Frontend, *frontend2.Frontend2, error) { + switch { + case cfg.DownstreamURL != "": + // If the user has specified a downstream Prometheus, then we should use that. + rt, err := NewDownstreamRoundTripper(cfg.DownstreamURL) + return rt, nil, nil, err + + case cfg.Frontend2.SchedulerAddr != "": + // If query-scheduler address is configured, use Frontend2. + if cfg.Frontend2.Addr == "" { + addr, err := util.GetFirstAddressOf(cfg.Frontend2.InfNames) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "failed to get frontend address") + } + + cfg.Frontend2.Addr = addr + } + + if cfg.Frontend2.Port == 0 { + cfg.Frontend2.Port = grpcListenPort + } + + fr, err := frontend2.NewFrontend2(cfg.Frontend2, log, reg) + return AdaptGrpcRoundTripperToHTTPRoundTripper(fr), nil, fr, err + + default: + // No scheduler = use original frontend. + fr, err := New(cfg.Frontend, limits, log, reg) + if err != nil { + return nil, nil, nil, err + } + + return AdaptGrpcRoundTripperToHTTPRoundTripper(fr), fr, nil, err + } +} + +// Configuration for both querier workers, V1 (using frontend) and V2 (using scheduler). Since many flags are reused +// between the two, they are exposed to YAML/CLI in V1 version (WorkerConfig), and copied to V2 in the init method. +type CombinedWorkerConfig struct { + WorkerV1 WorkerConfig `yaml:",inline"` + WorkerV2 frontend2.QuerierWorkersConfig `yaml:",inline"` +} + +func (cfg *CombinedWorkerConfig) RegisterFlags(f *flag.FlagSet) { + cfg.WorkerV1.RegisterFlags(f) + cfg.WorkerV2.RegisterFlags(f) +} + +func (cfg *CombinedWorkerConfig) InitQuerierWorker(querierCfg querier.Config, handler http.Handler, log log.Logger) (services.Service, error) { + switch { + case cfg.WorkerV2.SchedulerAddr != "": + // Copy settings from querier v1 config struct. + cfg.WorkerV2.GRPCClientConfig = cfg.WorkerV1.GRPCClientConfig + cfg.WorkerV2.MatchMaxConcurrency = cfg.WorkerV1.MatchMaxConcurrency + cfg.WorkerV2.MaxConcurrentRequests = querierCfg.MaxConcurrent + cfg.WorkerV2.Parallelism = cfg.WorkerV1.Parallelism + cfg.WorkerV2.QuerierID = cfg.WorkerV1.QuerierID + + level.Info(log).Log("msg", "Starting querier worker v2 with scheduler", "scheduler", cfg.WorkerV2.SchedulerAddr) + return frontend2.NewQuerierSchedulerWorkers(cfg.WorkerV2, httpgrpc_server.NewServer(handler), prometheus.DefaultRegisterer, log) + + case cfg.WorkerV1.FrontendAddress != "": + level.Info(log).Log("msg", "Starting querier worker v1 with frontend", "frontend", cfg.WorkerV1.FrontendAddress) + return NewWorker(cfg.WorkerV1, querierCfg, httpgrpc_server.NewServer(handler), log) + + default: + // No querier worker is necessary, querier will receive queries directly from HTTP server. + return nil, nil + } +} + +func (cfg *CombinedWorkerConfig) Validate(logger log.Logger) error { + return cfg.WorkerV1.Validate(logger) +} diff --git a/pkg/querier/frontend/downstream_roundtripper.go b/pkg/querier/frontend/downstream_roundtripper.go new file mode 100644 index 00000000000..f0c342eac4a --- /dev/null +++ b/pkg/querier/frontend/downstream_roundtripper.go @@ -0,0 +1,40 @@ +package frontend + +import ( + "net/http" + "net/url" + "path" + + "github.com/opentracing/opentracing-go" +) + +// RoundTripper that forwards requests to downstream URL. +type downstreamRoundTripper struct { + downstreamURL *url.URL +} + +func NewDownstreamRoundTripper(downstreamURL string) (http.RoundTripper, error) { + u, err := url.Parse(downstreamURL) + if err != nil { + return nil, err + } + + return &downstreamRoundTripper{downstreamURL: u}, nil +} + +func (d downstreamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) + if tracer != nil && span != nil { + carrier := opentracing.HTTPHeadersCarrier(r.Header) + err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) + if err != nil { + return nil, err + } + } + + r.URL.Scheme = d.downstreamURL.Scheme + r.URL.Host = d.downstreamURL.Host + r.URL.Path = path.Join(d.downstreamURL.Path, r.URL.Path) + r.Host = "" + return http.DefaultTransport.RoundTrip(r) +} diff --git a/pkg/querier/frontend/frontend.go b/pkg/querier/frontend/frontend.go index 94235e1d9f3..c9fc228d688 100644 --- a/pkg/querier/frontend/frontend.go +++ b/pkg/querier/frontend/frontend.go @@ -1,63 +1,36 @@ package frontend import ( - "bytes" "context" "errors" "flag" "fmt" - "io" - "io/ioutil" "net/http" - "net/url" - "path" - "strings" "sync" "time" - "github.com/NYTimes/gziphandler" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/user" "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - // StatusClientClosedRequest is the status code for when a client request cancellation of an http request - StatusClientClosedRequest = 499 - defaultMaxBodySize = 10 * 1024 * 1024 // 10 MiB ) var ( - errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") - errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) - errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) - errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large") + errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") ) // Config for a Frontend. type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` - CompressResponses bool `yaml:"compress_responses"` - DownstreamURL string `yaml:"downstream_url"` - MaxBodySize int64 `yaml:"max_body_size"` - LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` + MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxOutstandingPerTenant, "querier.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429.") - f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") - f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") - f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", defaultMaxBodySize, "Max body size for downstream prometheus.") - f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") } type Limits interface { @@ -68,10 +41,9 @@ type Limits interface { // Frontend queues HTTP requests, dispatches them to backends, and handles retries // for requests which failed. type Frontend struct { - cfg Config - log log.Logger - roundTripper http.RoundTripper - limits Limits + cfg Config + log log.Logger + limits Limits mtx sync.Mutex cond *sync.Cond // Notified when request is enqueued or dequeued, or querier is disconnected. @@ -123,48 +95,9 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist } f.cond = sync.NewCond(&f.mtx) - // The front end implements http.RoundTripper using a GRPC worker queue by default. - f.roundTripper = f - // However if the user has specified a downstream Prometheus, then we should use that. - if cfg.DownstreamURL != "" { - u, err := url.Parse(cfg.DownstreamURL) - if err != nil { - return nil, err - } - - f.roundTripper = RoundTripFunc(func(r *http.Request) (*http.Response, error) { - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) - if tracer != nil && span != nil { - carrier := opentracing.HTTPHeadersCarrier(r.Header) - tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - } - r.URL.Scheme = u.Scheme - r.URL.Host = u.Host - r.URL.Path = path.Join(u.Path, r.URL.Path) - r.Host = "" - return http.DefaultTransport.RoundTrip(r) - }) - } - return f, nil } -// Wrap uses a Tripperware to chain a new RoundTripper to the frontend. -func (f *Frontend) Wrap(trw Tripperware) { - f.roundTripper = trw(f.roundTripper) -} - -// Tripperware is a signature for all http client-side middleware. -type Tripperware func(http.RoundTripper) http.RoundTripper - -// RoundTripFunc is to http.RoundTripper what http.HandlerFunc is to http.Handler. -type RoundTripFunc func(*http.Request) (*http.Response, error) - -// RoundTrip implements http.RoundTripper. -func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return f(r) -} - // Close stops new requests and errors out any pending requests. func (f *Frontend) Close() { f.mtx.Lock() @@ -174,112 +107,6 @@ func (f *Frontend) Close() { } } -// Handler for HTTP requests. -func (f *Frontend) Handler() http.Handler { - if f.cfg.CompressResponses { - return gziphandler.GzipHandler(http.HandlerFunc(f.handle)) - } - return http.HandlerFunc(f.handle) -} - -func (f *Frontend) handle(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - // Buffer the body for later use to track slow queries. - var buf bytes.Buffer - r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize) - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf)) - - startTime := time.Now() - resp, err := f.roundTripper.RoundTrip(r) - queryResponseTime := time.Since(startTime) - - if err != nil { - writeError(w, err) - return - } - - hs := w.Header() - for h, vs := range resp.Header { - hs[h] = vs - } - - w.WriteHeader(resp.StatusCode) - // we don't check for copy error as there is no much we can do at this point - io.Copy(w, resp.Body) - - f.reportSlowQuery(queryResponseTime, r, buf) -} - -// reportSlowQuery reprots slow queries if LogQueriesLongerThan is set to <0, where 0 disables logging -func (f *Frontend) reportSlowQuery(queryResponseTime time.Duration, r *http.Request, bodyBuf bytes.Buffer) { - if f.cfg.LogQueriesLongerThan == 0 || queryResponseTime <= f.cfg.LogQueriesLongerThan { - return - } - - logMessage := []interface{}{ - "msg", "slow query detected", - "method", r.Method, - "host", r.Host, - "path", r.URL.Path, - "time_taken", queryResponseTime.String(), - } - - // use previously buffered body - r.Body = ioutil.NopCloser(&bodyBuf) - - // Ensure the form has been parsed so all the parameters are present - err := r.ParseForm() - if err != nil { - level.Warn(util.WithContext(r.Context(), f.log)).Log("msg", "unable to parse form for request", "err", err) - } - - // Attempt to iterate through the Form to log any filled in values - for k, v := range r.Form { - logMessage = append(logMessage, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) - } - - level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) - -} - -func writeError(w http.ResponseWriter, err error) { - switch err { - case context.Canceled: - err = errCanceled - case context.DeadlineExceeded: - err = errDeadlineExceeded - default: - if strings.Contains(err.Error(), "http: request body too large") { - err = errRequestEntityTooLarge - } - } - server.WriteError(w, err) -} - -// RoundTrip implement http.Transport. -func (f *Frontend) RoundTrip(r *http.Request) (*http.Response, error) { - req, err := server.HTTPRequest(r) - if err != nil { - return nil, err - } - - resp, err := f.RoundTripGRPC(r.Context(), req) - if err != nil { - return nil, err - } - - httpResp := &http.Response{ - StatusCode: int(resp.Code), - Body: ioutil.NopCloser(bytes.NewReader(resp.Body)), - Header: http.Header{}, - } - for _, h := range resp.Headers { - httpResp.Header[h.Key] = h.Values - } - return httpResp, nil -} - type httpgrpcHeadersCarrier httpgrpc.HTTPRequest func (c *httpgrpcHeadersCarrier) Set(key, val string) { @@ -522,12 +349,6 @@ FindQueue: // CheckReady determines if the query frontend is ready. Function parameters/return // chosen to match the same method in the ingester func (f *Frontend) CheckReady(_ context.Context) error { - // if the downstream url is configured the query frontend is not aware of the state - // of the queriers and is therefore always ready - if f.cfg.DownstreamURL != "" { - return nil - } - // if we have more than one querier connected we will consider ourselves ready connectedClients := f.connectedClients.Load() if connectedClients > 0 { diff --git a/pkg/querier/frontend/frontend_test.go b/pkg/querier/frontend/frontend_test.go index c62417a364f..a9b20e36e36 100644 --- a/pkg/querier/frontend/frontend_test.go +++ b/pkg/querier/frontend/frontend_test.go @@ -226,22 +226,17 @@ func TestFrontendCancelStatusCode(t *testing.T) { func TestFrontendCheckReady(t *testing.T) { for _, tt := range []struct { name string - downstreamURL string connectedClients int32 msg string readyForRequests bool }{ - {"downstream url is always ready", "super url", 0, "", true}, - {"connected clients are ready", "", 3, "", true}, - {"no url, no clients is not ready", "", 0, "not ready: number of queriers connected to query-frontend is 0", false}, + {"connected clients are ready", 3, "", true}, + {"no url, no clients is not ready", 0, "not ready: number of queriers connected to query-frontend is 0", false}, } { t.Run(tt.name, func(t *testing.T) { f := &Frontend{ connectedClients: atomic.NewInt32(tt.connectedClients), log: log.NewNopLogger(), - cfg: Config{ - DownstreamURL: tt.downstreamURL, - }, } err := f.CheckReady(context.Background()) errMsg := "" @@ -273,7 +268,7 @@ func TestFrontend_LogsSlowQueriesFormValues(t *testing.T) { // Configure the query-frontend with the mocked downstream server. config := defaultFrontendConfig() - config.LogQueriesLongerThan = 1 * time.Microsecond + config.Handler.LogQueriesLongerThan = 1 * time.Microsecond config.DownstreamURL = fmt.Sprintf("http://%s", downstreamListen.Addr()) var buf bytes.Buffer @@ -336,7 +331,7 @@ func TestFrontend_ReturnsRequestBodyTooLargeError(t *testing.T) { // Configure the query-frontend with the mocked downstream server. config := defaultFrontendConfig() config.DownstreamURL = fmt.Sprintf("http://%s", downstreamListen.Addr()) - config.MaxBodySize = 1 + config.Handler.MaxBodySize = 1 test := func(addr string) { data := url.Values{} @@ -368,7 +363,7 @@ func TestFrontend_ReturnsRequestBodyTooLargeError(t *testing.T) { testFrontend(t, config, nil, test, false, nil) } -func testFrontend(t *testing.T, config Config, handler http.Handler, test func(addr string), matchMaxConcurrency bool, l log.Logger) { +func testFrontend(t *testing.T, config CombinedFrontendConfig, handler http.Handler, test func(addr string), matchMaxConcurrency bool, l log.Logger) { logger := log.NewNopLogger() if l != nil { logger = l @@ -386,27 +381,34 @@ func testFrontend(t *testing.T, config Config, handler http.Handler, test func(a // localhost:0 prevents firewall warnings on Mac OS X. grpcListen, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) - workerConfig.Address = grpcListen.Addr().String() + workerConfig.FrontendAddress = grpcListen.Addr().String() httpListen, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) - frontend, err := New(config, limits{}, logger, nil) + rt, v1, v2, err := config.InitFrontend(limits{}, 0, logger, nil) require.NoError(t, err) - defer frontend.Close() + require.NotNil(t, rt) + // v1 will be nil if DownstreamURL is defined. + require.Nil(t, v2) + if v1 != nil { + defer v1.Close() + } grpcServer := grpc.NewServer( grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer())), ) defer grpcServer.GracefulStop() - RegisterFrontendServer(grpcServer, frontend) + if v1 != nil { + RegisterFrontendServer(grpcServer, v1) + } r := mux.NewRouter() r.PathPrefix("/").Handler(middleware.Merge( middleware.AuthenticateUser, middleware.Tracer{}, - ).Wrap(frontend.Handler())) + ).Wrap(NewHandler(config.Handler, rt, logger))) httpServer := http.Server{ Handler: r, @@ -426,9 +428,12 @@ func testFrontend(t *testing.T, config Config, handler http.Handler, test func(a require.NoError(t, services.StopAndAwaitTerminated(context.Background(), worker)) } -func defaultFrontendConfig() Config { - config := Config{} +func defaultFrontendConfig() CombinedFrontendConfig { + config := CombinedFrontendConfig{} flagext.DefaultValues(&config) + flagext.DefaultValues(&config.Handler) + flagext.DefaultValues(&config.Frontend) + flagext.DefaultValues(&config.Frontend2) return config } diff --git a/pkg/querier/frontend/handler.go b/pkg/querier/frontend/handler.go new file mode 100644 index 00000000000..411f71f6920 --- /dev/null +++ b/pkg/querier/frontend/handler.go @@ -0,0 +1,172 @@ +package frontend + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/httpgrpc/server" + + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // StatusClientClosedRequest is the status code for when a client request cancellation of an http request + StatusClientClosedRequest = 499 +) + +var ( + errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) + errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) + errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large") +) + +// Config for a Handler. +type HandlerConfig struct { + LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` + MaxBodySize int64 `yaml:"max_body_size"` +} + +func (cfg *HandlerConfig) RegisterFlags(f *flag.FlagSet) { + f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") + f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", 10*1024*1024, "Max body size for downstream prometheus.") +} + +// Handler accepts queries and forwards them to RoundTripper. It can log slow queries, +// but all other logic is inside the RoundTripper. +type Handler struct { + cfg HandlerConfig + log log.Logger + roundTripper http.RoundTripper +} + +// New creates a new frontend handler. +func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logger) http.Handler { + return &Handler{ + cfg: cfg, + log: log, + roundTripper: roundTripper, + } +} + +func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer func() { + _ = r.Body.Close() + }() + + // Buffer the body for later use to track slow queries. + var buf bytes.Buffer + r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize) + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf)) + + startTime := time.Now() + resp, err := f.roundTripper.RoundTrip(r) + queryResponseTime := time.Since(startTime) + + if err != nil { + writeError(w, err) + return + } + + hs := w.Header() + for h, vs := range resp.Header { + hs[h] = vs + } + + w.WriteHeader(resp.StatusCode) + // we don't check for copy error as there is no much we can do at this point + _, _ = io.Copy(w, resp.Body) + + f.reportSlowQuery(queryResponseTime, r, buf) +} + +// reportSlowQuery reports slow queries if LogQueriesLongerThan is set to <0, where 0 disables logging +func (f *Handler) reportSlowQuery(queryResponseTime time.Duration, r *http.Request, bodyBuf bytes.Buffer) { + if f.cfg.LogQueriesLongerThan == 0 || queryResponseTime <= f.cfg.LogQueriesLongerThan { + return + } + + logMessage := []interface{}{ + "msg", "slow query detected", + "method", r.Method, + "host", r.Host, + "path", r.URL.Path, + "time_taken", queryResponseTime.String(), + } + + // use previously buffered body + r.Body = ioutil.NopCloser(&bodyBuf) + + // Ensure the form has been parsed so all the parameters are present + err := r.ParseForm() + if err != nil { + level.Warn(util.WithContext(r.Context(), f.log)).Log("msg", "unable to parse form for request", "err", err) + } + + // Attempt to iterate through the Form to log any filled in values + for k, v := range r.Form { + logMessage = append(logMessage, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) + } + + level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) + +} + +func writeError(w http.ResponseWriter, err error) { + switch err { + case context.Canceled: + err = errCanceled + case context.DeadlineExceeded: + err = errDeadlineExceeded + default: + if strings.Contains(err.Error(), "http: request body too large") { + err = errRequestEntityTooLarge + } + } + server.WriteError(w, err) +} + +// GrpcRoundTripper is similar to http.RoundTripper, but works with HTTP requests converted to protobuf messages. +type GrpcRoundTripper interface { + RoundTripGRPC(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) +} + +func AdaptGrpcRoundTripperToHTTPRoundTripper(r GrpcRoundTripper) http.RoundTripper { + return &grpcRoundTripperAdapter{roundTripper: r} +} + +// This adapter wraps GrpcRoundTripper and converted it into http.RoundTripper +type grpcRoundTripperAdapter struct { + roundTripper GrpcRoundTripper +} + +func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, error) { + req, err := server.HTTPRequest(r) + if err != nil { + return nil, err + } + + resp, err := a.roundTripper.RoundTripGRPC(r.Context(), req) + if err != nil { + return nil, err + } + + httpResp := &http.Response{ + StatusCode: int(resp.Code), + Body: ioutil.NopCloser(bytes.NewReader(resp.Body)), + Header: http.Header{}, + } + for _, h := range resp.Headers { + httpResp.Header[h.Key] = h.Values + } + return httpResp, nil +} diff --git a/pkg/querier/frontend/worker.go b/pkg/querier/frontend/worker.go index 48f133659cd..dc4b40a38c5 100644 --- a/pkg/querier/frontend/worker.go +++ b/pkg/querier/frontend/worker.go @@ -23,7 +23,7 @@ import ( // WorkerConfig is config for a worker. type WorkerConfig struct { - Address string `yaml:"frontend_address"` + FrontendAddress string `yaml:"frontend_address"` Parallelism int `yaml:"parallelism"` MatchMaxConcurrency bool `yaml:"match_max_concurrent"` DNSLookupDuration time.Duration `yaml:"dns_lookup_duration"` @@ -34,7 +34,7 @@ type WorkerConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Address, "querier.frontend-address", "", "Address of query frontend service, in host:port format.") + f.StringVar(&cfg.FrontendAddress, "querier.frontend-address", "", "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. If neither -querier.frontend-address or -querier.scheduler-address is set, queries must arrive via HTTP endpoint.") f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process per query frontend.") f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.") f.DurationVar(&cfg.DNSLookupDuration, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS.") @@ -59,11 +59,10 @@ type worker struct { } // NewWorker creates a new worker and returns a service that is wrapping it. -// If no address is specified, it returns nil service (and no error). +// If no address is specified, it returns error. func NewWorker(cfg WorkerConfig, querierCfg querier.Config, server *server.Server, log log.Logger) (services.Service, error) { - if cfg.Address == "" { - level.Info(log).Log("msg", "no address specified, not starting worker") - return nil, nil + if cfg.FrontendAddress == "" { + return nil, errors.New("frontend address not configured") } if cfg.QuerierID == "" { @@ -79,7 +78,7 @@ func NewWorker(cfg WorkerConfig, querierCfg querier.Config, server *server.Serve return nil, err } - watcher, err := resolver.Resolve(cfg.Address) + watcher, err := resolver.Resolve(cfg.FrontendAddress) if err != nil { return nil, err } diff --git a/pkg/querier/frontend2/dns_watcher.go b/pkg/querier/frontend2/dns_watcher.go new file mode 100644 index 00000000000..5ca7820c248 --- /dev/null +++ b/pkg/querier/frontend2/dns_watcher.go @@ -0,0 +1,80 @@ +package frontend2 + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "google.golang.org/grpc/naming" + + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Notifications about address resolution. All notifications are sent on the same goroutine. +type DNSNotifications interface { + AddressAdded(address string) + + AddressRemoved(address string) +} + +type dnsWatcher struct { + watcher naming.Watcher //nolint:staticcheck //Skipping for now. If you still see this more than likely issue https://github.com/cortexproject/cortex/issues/2015 has not yet been addressed. + notifications DNSNotifications +} + +// NewDNSWatcher creates a new DNS watcher and returns a service that is wrapping it. +func NewDNSWatcher(address string, dnsLookupPeriod time.Duration, notifications DNSNotifications) (services.Service, error) { + resolver, err := naming.NewDNSResolverWithFreq(dnsLookupPeriod) + if err != nil { + return nil, err + } + + watcher, err := resolver.Resolve(address) + if err != nil { + return nil, err + } + + w := &dnsWatcher{ + watcher: watcher, + notifications: notifications, + } + return services.NewBasicService(nil, w.watchDNSLoop, nil), nil +} + +// watchDNSLoop watches for changes in DNS and sends notifications. +func (w *dnsWatcher) watchDNSLoop(servCtx context.Context) error { + go func() { + // Close the watcher, when this service is asked to stop. + // Closing the watcher makes watchDNSLoop exit, since it only iterates on watcher updates, and has no other + // way to stop. We cannot close the watcher in `stopping` method, because it is only called *after* + // watchDNSLoop exits. + <-servCtx.Done() + w.watcher.Close() + }() + + for { + updates, err := w.watcher.Next() + if err != nil { + // watcher.Next returns error when Close is called, but we call Close when our context is done. + // we don't want to report error in that case. + if servCtx.Err() != nil { + return nil + } + return errors.Wrapf(err, "error from DNS watcher") + } + + for _, update := range updates { + switch update.Op { + case naming.Add: + w.notifications.AddressAdded(update.Addr) + + case naming.Delete: + w.notifications.AddressRemoved(update.Addr) + + default: + return fmt.Errorf("unknown op: %v", update.Op) + } + } + } +} diff --git a/pkg/querier/frontend2/frontend.pb.go b/pkg/querier/frontend2/frontend.pb.go new file mode 100644 index 00000000000..038207c6430 --- /dev/null +++ b/pkg/querier/frontend2/frontend.pb.go @@ -0,0 +1,2309 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: frontend.proto + +package frontend2 + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + httpgrpc "github.com/weaveworks/common/httpgrpc" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FrontendToSchedulerType int32 + +const ( + INIT FrontendToSchedulerType = 0 + ENQUEUE FrontendToSchedulerType = 1 + CANCEL FrontendToSchedulerType = 2 +) + +var FrontendToSchedulerType_name = map[int32]string{ + 0: "INIT", + 1: "ENQUEUE", + 2: "CANCEL", +} + +var FrontendToSchedulerType_value = map[string]int32{ + "INIT": 0, + "ENQUEUE": 1, + "CANCEL": 2, +} + +func (FrontendToSchedulerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{0} +} + +type SchedulerToFrontendStatus int32 + +const ( + OK SchedulerToFrontendStatus = 0 + TOO_MANY_REQUESTS_PER_TENANT SchedulerToFrontendStatus = 1 + ERROR SchedulerToFrontendStatus = 2 + SHUTTING_DOWN SchedulerToFrontendStatus = 3 +) + +var SchedulerToFrontendStatus_name = map[int32]string{ + 0: "OK", + 1: "TOO_MANY_REQUESTS_PER_TENANT", + 2: "ERROR", + 3: "SHUTTING_DOWN", +} + +var SchedulerToFrontendStatus_value = map[string]int32{ + "OK": 0, + "TOO_MANY_REQUESTS_PER_TENANT": 1, + "ERROR": 2, + "SHUTTING_DOWN": 3, +} + +func (SchedulerToFrontendStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{1} +} + +// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. +// To signal that querier is ready to accept another request, querier sends empty message. +type QuerierToScheduler struct { + QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` +} + +func (m *QuerierToScheduler) Reset() { *m = QuerierToScheduler{} } +func (*QuerierToScheduler) ProtoMessage() {} +func (*QuerierToScheduler) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{0} +} +func (m *QuerierToScheduler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuerierToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuerierToScheduler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuerierToScheduler) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuerierToScheduler.Merge(m, src) +} +func (m *QuerierToScheduler) XXX_Size() int { + return m.Size() +} +func (m *QuerierToScheduler) XXX_DiscardUnknown() { + xxx_messageInfo_QuerierToScheduler.DiscardUnknown(m) +} + +var xxx_messageInfo_QuerierToScheduler proto.InternalMessageInfo + +func (m *QuerierToScheduler) GetQuerierID() string { + if m != nil { + return m.QuerierID + } + return "" +} + +type SchedulerToQuerier struct { + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` + // Where should querier send HTTP Response to (using FrontendForQuerier interface). + FrontendAddress string `protobuf:"bytes,3,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` + // User who initiated the request. Needed to send reply back to frontend. + UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` +} + +func (m *SchedulerToQuerier) Reset() { *m = SchedulerToQuerier{} } +func (*SchedulerToQuerier) ProtoMessage() {} +func (*SchedulerToQuerier) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{1} +} +func (m *SchedulerToQuerier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SchedulerToQuerier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SchedulerToQuerier.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SchedulerToQuerier) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchedulerToQuerier.Merge(m, src) +} +func (m *SchedulerToQuerier) XXX_Size() int { + return m.Size() +} +func (m *SchedulerToQuerier) XXX_DiscardUnknown() { + xxx_messageInfo_SchedulerToQuerier.DiscardUnknown(m) +} + +var xxx_messageInfo_SchedulerToQuerier proto.InternalMessageInfo + +func (m *SchedulerToQuerier) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *SchedulerToQuerier) GetHttpRequest() *httpgrpc.HTTPRequest { + if m != nil { + return m.HttpRequest + } + return nil +} + +func (m *SchedulerToQuerier) GetFrontendAddress() string { + if m != nil { + return m.FrontendAddress + } + return "" +} + +func (m *SchedulerToQuerier) GetUserID() string { + if m != nil { + return m.UserID + } + return "" +} + +type QueryResultRequest struct { + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,2,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` +} + +func (m *QueryResultRequest) Reset() { *m = QueryResultRequest{} } +func (*QueryResultRequest) ProtoMessage() {} +func (*QueryResultRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{2} +} +func (m *QueryResultRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResultRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResultRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResultRequest.Merge(m, src) +} +func (m *QueryResultRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryResultRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResultRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResultRequest proto.InternalMessageInfo + +func (m *QueryResultRequest) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *QueryResultRequest) GetHttpResponse() *httpgrpc.HTTPResponse { + if m != nil { + return m.HttpResponse + } + return nil +} + +type QueryResultResponse struct { +} + +func (m *QueryResultResponse) Reset() { *m = QueryResultResponse{} } +func (*QueryResultResponse) ProtoMessage() {} +func (*QueryResultResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{3} +} +func (m *QueryResultResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResultResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResultResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResultResponse.Merge(m, src) +} +func (m *QueryResultResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryResultResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResultResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResultResponse proto.InternalMessageInfo + +type FrontendToScheduler struct { + Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=frontend2.FrontendToSchedulerType" json:"type,omitempty"` + // Used by INIT message. Will be put into all requests passed to querier. + FrontendAddress string `protobuf:"bytes,2,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` + // Used by ENQUEUE and CANCEL. Each enqueued query must have queryID higher than previous one. + QueryID uint64 `protobuf:"varint,3,opt,name=queryID,proto3" json:"queryID,omitempty"` + // Following are used by ENQUEUE only. + UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` + HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,5,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` +} + +func (m *FrontendToScheduler) Reset() { *m = FrontendToScheduler{} } +func (*FrontendToScheduler) ProtoMessage() {} +func (*FrontendToScheduler) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{4} +} +func (m *FrontendToScheduler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FrontendToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FrontendToScheduler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FrontendToScheduler) XXX_Merge(src proto.Message) { + xxx_messageInfo_FrontendToScheduler.Merge(m, src) +} +func (m *FrontendToScheduler) XXX_Size() int { + return m.Size() +} +func (m *FrontendToScheduler) XXX_DiscardUnknown() { + xxx_messageInfo_FrontendToScheduler.DiscardUnknown(m) +} + +var xxx_messageInfo_FrontendToScheduler proto.InternalMessageInfo + +func (m *FrontendToScheduler) GetType() FrontendToSchedulerType { + if m != nil { + return m.Type + } + return INIT +} + +func (m *FrontendToScheduler) GetFrontendAddress() string { + if m != nil { + return m.FrontendAddress + } + return "" +} + +func (m *FrontendToScheduler) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *FrontendToScheduler) GetUserID() string { + if m != nil { + return m.UserID + } + return "" +} + +func (m *FrontendToScheduler) GetHttpRequest() *httpgrpc.HTTPRequest { + if m != nil { + return m.HttpRequest + } + return nil +} + +type SchedulerToFrontend struct { + Status SchedulerToFrontendStatus `protobuf:"varint,1,opt,name=status,proto3,enum=frontend2.SchedulerToFrontendStatus" json:"status,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SchedulerToFrontend) Reset() { *m = SchedulerToFrontend{} } +func (*SchedulerToFrontend) ProtoMessage() {} +func (*SchedulerToFrontend) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{5} +} +func (m *SchedulerToFrontend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SchedulerToFrontend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SchedulerToFrontend.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SchedulerToFrontend) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchedulerToFrontend.Merge(m, src) +} +func (m *SchedulerToFrontend) XXX_Size() int { + return m.Size() +} +func (m *SchedulerToFrontend) XXX_DiscardUnknown() { + xxx_messageInfo_SchedulerToFrontend.DiscardUnknown(m) +} + +var xxx_messageInfo_SchedulerToFrontend proto.InternalMessageInfo + +func (m *SchedulerToFrontend) GetStatus() SchedulerToFrontendStatus { + if m != nil { + return m.Status + } + return OK +} + +func (m *SchedulerToFrontend) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterEnum("frontend2.FrontendToSchedulerType", FrontendToSchedulerType_name, FrontendToSchedulerType_value) + proto.RegisterEnum("frontend2.SchedulerToFrontendStatus", SchedulerToFrontendStatus_name, SchedulerToFrontendStatus_value) + proto.RegisterType((*QuerierToScheduler)(nil), "frontend2.QuerierToScheduler") + proto.RegisterType((*SchedulerToQuerier)(nil), "frontend2.SchedulerToQuerier") + proto.RegisterType((*QueryResultRequest)(nil), "frontend2.QueryResultRequest") + proto.RegisterType((*QueryResultResponse)(nil), "frontend2.QueryResultResponse") + proto.RegisterType((*FrontendToScheduler)(nil), "frontend2.FrontendToScheduler") + proto.RegisterType((*SchedulerToFrontend)(nil), "frontend2.SchedulerToFrontend") +} + +func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } + +var fileDescriptor_eca3873955a29cfe = []byte{ + // 632 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdf, 0x4e, 0x13, 0x4f, + 0x14, 0xde, 0x29, 0xa5, 0xfc, 0x7a, 0xca, 0x0f, 0xd7, 0xe1, 0x8f, 0xb5, 0x81, 0x09, 0xd9, 0x78, + 0xd1, 0x90, 0xd8, 0x9a, 0xd5, 0x68, 0x62, 0x48, 0x4c, 0x85, 0x45, 0x1a, 0x71, 0x4b, 0xa7, 0xd3, + 0x18, 0xbd, 0x69, 0xa0, 0x1d, 0x5a, 0x14, 0x3a, 0xcb, 0xfe, 0x91, 0xf4, 0xce, 0x47, 0xf0, 0x31, + 0xf4, 0x4d, 0xbc, 0xe4, 0x92, 0x4b, 0x59, 0x6e, 0xbc, 0xe4, 0x11, 0x0c, 0xb3, 0xb3, 0x65, 0x0b, + 0xad, 0xf1, 0x6e, 0xce, 0x99, 0xef, 0xec, 0xf9, 0xbe, 0xef, 0x9c, 0x59, 0x98, 0x3b, 0x70, 0x45, + 0xdf, 0xe7, 0xfd, 0x4e, 0xc9, 0x71, 0x85, 0x2f, 0x70, 0x36, 0x8e, 0xcd, 0xc2, 0xe3, 0xee, 0xa1, + 0xdf, 0x0b, 0xf6, 0x4b, 0x6d, 0x71, 0x5c, 0xee, 0x8a, 0xae, 0x28, 0x4b, 0xc4, 0x7e, 0x70, 0x20, + 0x23, 0x19, 0xc8, 0x53, 0x54, 0x59, 0x78, 0x96, 0x80, 0x9f, 0xf2, 0xbd, 0x2f, 0xfc, 0x54, 0xb8, + 0x9f, 0xbd, 0x72, 0x5b, 0x1c, 0x1f, 0x8b, 0x7e, 0xb9, 0xe7, 0xfb, 0x4e, 0xd7, 0x75, 0xda, 0xc3, + 0x43, 0x54, 0x65, 0x98, 0x80, 0xeb, 0x01, 0x77, 0x0f, 0xb9, 0xcb, 0x44, 0xa3, 0xdd, 0xe3, 0x9d, + 0xe0, 0x88, 0xbb, 0x78, 0x19, 0xb2, 0x27, 0x51, 0xb6, 0xba, 0x99, 0x47, 0xab, 0xa8, 0x98, 0xa5, + 0x37, 0x09, 0xe3, 0x07, 0x02, 0x3c, 0xc4, 0x32, 0xa1, 0xea, 0x71, 0x1e, 0x66, 0xae, 0x31, 0x03, + 0x55, 0x92, 0xa6, 0x71, 0x88, 0x5f, 0x40, 0xee, 0xba, 0x2d, 0xe5, 0x27, 0x01, 0xf7, 0xfc, 0x7c, + 0x6a, 0x15, 0x15, 0x73, 0xe6, 0x62, 0x69, 0x48, 0x65, 0x9b, 0xb1, 0x5d, 0x75, 0x49, 0x93, 0x48, + 0x5c, 0x84, 0x7b, 0xb1, 0x1f, 0x95, 0x4e, 0xc7, 0xe5, 0x9e, 0x97, 0x9f, 0x92, 0x6c, 0x6e, 0xa7, + 0xf1, 0x12, 0x64, 0x02, 0x4f, 0xd2, 0x4d, 0x4b, 0x80, 0x8a, 0x8c, 0x4f, 0x91, 0xbe, 0x01, 0xe5, + 0x5e, 0x70, 0xe4, 0xc7, 0xdf, 0x9d, 0x4c, 0xf5, 0x25, 0xcc, 0x46, 0x04, 0x3c, 0x47, 0xf4, 0x3d, + 0xae, 0xb8, 0x2e, 0xdd, 0xe6, 0x1a, 0xdd, 0xd2, 0x11, 0xac, 0xb1, 0x08, 0xf3, 0x23, 0xbd, 0x54, + 0x3a, 0x44, 0x30, 0xbf, 0xa5, 0xe8, 0x26, 0x4d, 0x7e, 0x0e, 0x69, 0x7f, 0xe0, 0x70, 0xc9, 0x60, + 0xce, 0x34, 0x4a, 0xc3, 0xc9, 0x97, 0xc6, 0xa0, 0xd9, 0xc0, 0xe1, 0x54, 0xe2, 0xc7, 0x99, 0x92, + 0x1a, 0x6f, 0x4a, 0x42, 0xe6, 0xd4, 0xa8, 0xcc, 0x09, 0x76, 0xdd, 0x9e, 0xd4, 0xf4, 0xbf, 0x4e, + 0xca, 0x38, 0x84, 0xf9, 0xc4, 0x4a, 0xc4, 0x02, 0xf0, 0x3a, 0x64, 0x3c, 0x7f, 0xcf, 0x0f, 0x3c, + 0xa5, 0xf2, 0x51, 0x42, 0xe5, 0x18, 0x7c, 0x43, 0x62, 0xa9, 0xaa, 0xc1, 0x0b, 0x30, 0xcd, 0x5d, + 0x57, 0xb8, 0x4a, 0x5f, 0x14, 0xac, 0xad, 0xc3, 0x83, 0x09, 0x06, 0xe1, 0xff, 0x20, 0x5d, 0xb5, + 0xab, 0x4c, 0xd7, 0x70, 0x0e, 0x66, 0x2c, 0xbb, 0xde, 0xb4, 0x9a, 0x96, 0x8e, 0x30, 0x40, 0x66, + 0xa3, 0x62, 0x6f, 0x58, 0x3b, 0x7a, 0x6a, 0xad, 0x0d, 0x0f, 0x27, 0x36, 0xc6, 0x19, 0x48, 0xd5, + 0xde, 0xea, 0x1a, 0x5e, 0x85, 0x65, 0x56, 0xab, 0xb5, 0xde, 0x55, 0xec, 0x0f, 0x2d, 0x6a, 0xd5, + 0x9b, 0x56, 0x83, 0x35, 0x5a, 0xbb, 0x16, 0x6d, 0x31, 0xcb, 0xae, 0xd8, 0x4c, 0x47, 0x38, 0x0b, + 0xd3, 0x16, 0xa5, 0x35, 0xaa, 0xa7, 0xf0, 0x7d, 0xf8, 0xbf, 0xb1, 0xdd, 0x64, 0xac, 0x6a, 0xbf, + 0x69, 0x6d, 0xd6, 0xde, 0xdb, 0xfa, 0x94, 0xd9, 0x4b, 0xb8, 0xb1, 0x25, 0xdc, 0xf8, 0x85, 0xd4, + 0x21, 0xa7, 0x8e, 0x3b, 0x42, 0x38, 0x78, 0x25, 0x61, 0xc6, 0xdd, 0x47, 0x58, 0x58, 0x19, 0xef, + 0x95, 0x42, 0x1a, 0x5a, 0x11, 0x3d, 0x41, 0x66, 0x07, 0x70, 0xac, 0x21, 0xd1, 0xc8, 0x8e, 0x1a, + 0xa9, 0x4d, 0xbc, 0xd3, 0x68, 0xf4, 0x35, 0x14, 0xc8, 0xa4, 0x6b, 0xb5, 0xc0, 0x9a, 0x79, 0x04, + 0x0b, 0x49, 0x3d, 0xc3, 0xf1, 0x32, 0x98, 0x8d, 0xcf, 0x52, 0x11, 0xf9, 0xfb, 0x12, 0x8f, 0x74, + 0x1a, 0x33, 0x85, 0x48, 0xd3, 0xeb, 0x57, 0x67, 0x17, 0x44, 0x3b, 0xbf, 0x20, 0xda, 0xd5, 0x05, + 0x41, 0x5f, 0x43, 0x82, 0xbe, 0x87, 0x04, 0xfd, 0x0c, 0x09, 0x3a, 0x0b, 0x09, 0xfa, 0x15, 0x12, + 0xf4, 0x3b, 0x24, 0xda, 0x55, 0x48, 0xd0, 0xb7, 0x4b, 0xa2, 0x9d, 0x5d, 0x12, 0xed, 0xfc, 0x92, + 0x68, 0x1f, 0x6f, 0xfe, 0x9c, 0xfb, 0x19, 0xf9, 0x6f, 0x7b, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, + 0x26, 0x42, 0xa3, 0x4b, 0x5d, 0x05, 0x00, 0x00, +} + +func (x FrontendToSchedulerType) String() string { + s, ok := FrontendToSchedulerType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x SchedulerToFrontendStatus) String() string { + s, ok := SchedulerToFrontendStatus_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *QuerierToScheduler) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuerierToScheduler) + if !ok { + that2, ok := that.(QuerierToScheduler) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QuerierID != that1.QuerierID { + return false + } + return true +} +func (this *SchedulerToQuerier) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SchedulerToQuerier) + if !ok { + that2, ok := that.(SchedulerToQuerier) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if !this.HttpRequest.Equal(that1.HttpRequest) { + return false + } + if this.FrontendAddress != that1.FrontendAddress { + return false + } + if this.UserID != that1.UserID { + return false + } + return true +} +func (this *QueryResultRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResultRequest) + if !ok { + that2, ok := that.(QueryResultRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if !this.HttpResponse.Equal(that1.HttpResponse) { + return false + } + return true +} +func (this *QueryResultResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResultResponse) + if !ok { + that2, ok := that.(QueryResultResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *FrontendToScheduler) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FrontendToScheduler) + if !ok { + that2, ok := that.(FrontendToScheduler) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.FrontendAddress != that1.FrontendAddress { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if this.UserID != that1.UserID { + return false + } + if !this.HttpRequest.Equal(that1.HttpRequest) { + return false + } + return true +} +func (this *SchedulerToFrontend) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SchedulerToFrontend) + if !ok { + that2, ok := that.(SchedulerToFrontend) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Status != that1.Status { + return false + } + if this.Error != that1.Error { + return false + } + return true +} +func (this *QuerierToScheduler) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&frontend2.QuerierToScheduler{") + s = append(s, "QuerierID: "+fmt.Sprintf("%#v", this.QuerierID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SchedulerToQuerier) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&frontend2.SchedulerToQuerier{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + if this.HttpRequest != nil { + s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") + } + s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") + s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResultRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&frontend2.QueryResultRequest{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + if this.HttpResponse != nil { + s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResultResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&frontend2.QueryResultResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FrontendToScheduler) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&frontend2.FrontendToScheduler{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") + if this.HttpRequest != nil { + s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SchedulerToFrontend) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&frontend2.SchedulerToFrontend{") + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFrontend(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SchedulerForQuerierClient is the client API for SchedulerForQuerier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SchedulerForQuerierClient interface { + // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for + // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) +} + +type schedulerForQuerierClient struct { + cc *grpc.ClientConn +} + +func NewSchedulerForQuerierClient(cc *grpc.ClientConn) SchedulerForQuerierClient { + return &schedulerForQuerierClient{cc} +} + +func (c *schedulerForQuerierClient) QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) { + stream, err := c.cc.NewStream(ctx, &_SchedulerForQuerier_serviceDesc.Streams[0], "/frontend2.SchedulerForQuerier/QuerierLoop", opts...) + if err != nil { + return nil, err + } + x := &schedulerForQuerierQuerierLoopClient{stream} + return x, nil +} + +type SchedulerForQuerier_QuerierLoopClient interface { + Send(*QuerierToScheduler) error + Recv() (*SchedulerToQuerier, error) + grpc.ClientStream +} + +type schedulerForQuerierQuerierLoopClient struct { + grpc.ClientStream +} + +func (x *schedulerForQuerierQuerierLoopClient) Send(m *QuerierToScheduler) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerForQuerierQuerierLoopClient) Recv() (*SchedulerToQuerier, error) { + m := new(SchedulerToQuerier) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SchedulerForQuerierServer is the server API for SchedulerForQuerier service. +type SchedulerForQuerierServer interface { + // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for + // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + QuerierLoop(SchedulerForQuerier_QuerierLoopServer) error +} + +// UnimplementedSchedulerForQuerierServer can be embedded to have forward compatible implementations. +type UnimplementedSchedulerForQuerierServer struct { +} + +func (*UnimplementedSchedulerForQuerierServer) QuerierLoop(srv SchedulerForQuerier_QuerierLoopServer) error { + return status.Errorf(codes.Unimplemented, "method QuerierLoop not implemented") +} + +func RegisterSchedulerForQuerierServer(s *grpc.Server, srv SchedulerForQuerierServer) { + s.RegisterService(&_SchedulerForQuerier_serviceDesc, srv) +} + +func _SchedulerForQuerier_QuerierLoop_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerForQuerierServer).QuerierLoop(&schedulerForQuerierQuerierLoopServer{stream}) +} + +type SchedulerForQuerier_QuerierLoopServer interface { + Send(*SchedulerToQuerier) error + Recv() (*QuerierToScheduler, error) + grpc.ServerStream +} + +type schedulerForQuerierQuerierLoopServer struct { + grpc.ServerStream +} + +func (x *schedulerForQuerierQuerierLoopServer) Send(m *SchedulerToQuerier) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerForQuerierQuerierLoopServer) Recv() (*QuerierToScheduler, error) { + m := new(QuerierToScheduler) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SchedulerForQuerier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "frontend2.SchedulerForQuerier", + HandlerType: (*SchedulerForQuerierServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "QuerierLoop", + Handler: _SchedulerForQuerier_QuerierLoop_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "frontend.proto", +} + +// FrontendForQuerierClient is the client API for FrontendForQuerier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FrontendForQuerierClient interface { + QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) +} + +type frontendForQuerierClient struct { + cc *grpc.ClientConn +} + +func NewFrontendForQuerierClient(cc *grpc.ClientConn) FrontendForQuerierClient { + return &frontendForQuerierClient{cc} +} + +func (c *frontendForQuerierClient) QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) { + out := new(QueryResultResponse) + err := c.cc.Invoke(ctx, "/frontend2.FrontendForQuerier/QueryResult", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FrontendForQuerierServer is the server API for FrontendForQuerier service. +type FrontendForQuerierServer interface { + QueryResult(context.Context, *QueryResultRequest) (*QueryResultResponse, error) +} + +// UnimplementedFrontendForQuerierServer can be embedded to have forward compatible implementations. +type UnimplementedFrontendForQuerierServer struct { +} + +func (*UnimplementedFrontendForQuerierServer) QueryResult(ctx context.Context, req *QueryResultRequest) (*QueryResultResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryResult not implemented") +} + +func RegisterFrontendForQuerierServer(s *grpc.Server, srv FrontendForQuerierServer) { + s.RegisterService(&_FrontendForQuerier_serviceDesc, srv) +} + +func _FrontendForQuerier_QueryResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FrontendForQuerierServer).QueryResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/frontend2.FrontendForQuerier/QueryResult", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FrontendForQuerierServer).QueryResult(ctx, req.(*QueryResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _FrontendForQuerier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "frontend2.FrontendForQuerier", + HandlerType: (*FrontendForQuerierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryResult", + Handler: _FrontendForQuerier_QueryResult_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "frontend.proto", +} + +// SchedulerForFrontendClient is the client API for SchedulerForFrontend service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SchedulerForFrontendClient interface { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) +} + +type schedulerForFrontendClient struct { + cc *grpc.ClientConn +} + +func NewSchedulerForFrontendClient(cc *grpc.ClientConn) SchedulerForFrontendClient { + return &schedulerForFrontendClient{cc} +} + +func (c *schedulerForFrontendClient) FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) { + stream, err := c.cc.NewStream(ctx, &_SchedulerForFrontend_serviceDesc.Streams[0], "/frontend2.SchedulerForFrontend/FrontendLoop", opts...) + if err != nil { + return nil, err + } + x := &schedulerForFrontendFrontendLoopClient{stream} + return x, nil +} + +type SchedulerForFrontend_FrontendLoopClient interface { + Send(*FrontendToScheduler) error + Recv() (*SchedulerToFrontend, error) + grpc.ClientStream +} + +type schedulerForFrontendFrontendLoopClient struct { + grpc.ClientStream +} + +func (x *schedulerForFrontendFrontendLoopClient) Send(m *FrontendToScheduler) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerForFrontendFrontendLoopClient) Recv() (*SchedulerToFrontend, error) { + m := new(SchedulerToFrontend) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SchedulerForFrontendServer is the server API for SchedulerForFrontend service. +type SchedulerForFrontendServer interface { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + FrontendLoop(SchedulerForFrontend_FrontendLoopServer) error +} + +// UnimplementedSchedulerForFrontendServer can be embedded to have forward compatible implementations. +type UnimplementedSchedulerForFrontendServer struct { +} + +func (*UnimplementedSchedulerForFrontendServer) FrontendLoop(srv SchedulerForFrontend_FrontendLoopServer) error { + return status.Errorf(codes.Unimplemented, "method FrontendLoop not implemented") +} + +func RegisterSchedulerForFrontendServer(s *grpc.Server, srv SchedulerForFrontendServer) { + s.RegisterService(&_SchedulerForFrontend_serviceDesc, srv) +} + +func _SchedulerForFrontend_FrontendLoop_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerForFrontendServer).FrontendLoop(&schedulerForFrontendFrontendLoopServer{stream}) +} + +type SchedulerForFrontend_FrontendLoopServer interface { + Send(*SchedulerToFrontend) error + Recv() (*FrontendToScheduler, error) + grpc.ServerStream +} + +type schedulerForFrontendFrontendLoopServer struct { + grpc.ServerStream +} + +func (x *schedulerForFrontendFrontendLoopServer) Send(m *SchedulerToFrontend) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerForFrontendFrontendLoopServer) Recv() (*FrontendToScheduler, error) { + m := new(FrontendToScheduler) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SchedulerForFrontend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "frontend2.SchedulerForFrontend", + HandlerType: (*SchedulerForFrontendServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "FrontendLoop", + Handler: _SchedulerForFrontend_FrontendLoop_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "frontend.proto", +} + +func (m *QuerierToScheduler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuerierToScheduler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuerierToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QuerierID) > 0 { + i -= len(m.QuerierID) + copy(dAtA[i:], m.QuerierID) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.QuerierID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchedulerToQuerier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerToQuerier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchedulerToQuerier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UserID) > 0 { + i -= len(m.UserID) + copy(dAtA[i:], m.UserID) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.UserID))) + i-- + dAtA[i] = 0x22 + } + if len(m.FrontendAddress) > 0 { + i -= len(m.FrontendAddress) + copy(dAtA[i:], m.FrontendAddress) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.FrontendAddress))) + i-- + dAtA[i] = 0x1a + } + if m.HttpRequest != nil { + { + size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.QueryID != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResultRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResultRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HttpResponse != nil { + { + size, err := m.HttpResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.QueryID != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResultResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResultResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *FrontendToScheduler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FrontendToScheduler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FrontendToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HttpRequest != nil { + { + size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.UserID) > 0 { + i -= len(m.UserID) + copy(dAtA[i:], m.UserID) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.UserID))) + i-- + dAtA[i] = 0x22 + } + if m.QueryID != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x18 + } + if len(m.FrontendAddress) > 0 { + i -= len(m.FrontendAddress) + copy(dAtA[i:], m.FrontendAddress) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.FrontendAddress))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SchedulerToFrontend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerToFrontend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchedulerToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintFrontend(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintFrontend(dAtA []byte, offset int, v uint64) int { + offset -= sovFrontend(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QuerierToScheduler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QuerierID) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func (m *SchedulerToQuerier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovFrontend(uint64(m.QueryID)) + } + if m.HttpRequest != nil { + l = m.HttpRequest.Size() + n += 1 + l + sovFrontend(uint64(l)) + } + l = len(m.FrontendAddress) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + l = len(m.UserID) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func (m *QueryResultRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovFrontend(uint64(m.QueryID)) + } + if m.HttpResponse != nil { + l = m.HttpResponse.Size() + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func (m *QueryResultResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *FrontendToScheduler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovFrontend(uint64(m.Type)) + } + l = len(m.FrontendAddress) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + if m.QueryID != 0 { + n += 1 + sovFrontend(uint64(m.QueryID)) + } + l = len(m.UserID) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + if m.HttpRequest != nil { + l = m.HttpRequest.Size() + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func (m *SchedulerToFrontend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovFrontend(uint64(m.Status)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func sovFrontend(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFrontend(x uint64) (n int) { + return sovFrontend(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *QuerierToScheduler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuerierToScheduler{`, + `QuerierID:` + fmt.Sprintf("%v", this.QuerierID) + `,`, + `}`, + }, "") + return s +} +func (this *SchedulerToQuerier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchedulerToQuerier{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, + `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, + `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, + `}`, + }, "") + return s +} +func (this *QueryResultRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResultRequest{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryResultResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResultResponse{`, + `}`, + }, "") + return s +} +func (this *FrontendToScheduler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FrontendToScheduler{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, + `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SchedulerToFrontend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchedulerToFrontend{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func valueToStringFrontend(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *QuerierToScheduler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuerierToScheduler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuerierToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuerierID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuerierID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerToQuerier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerToQuerier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerToQuerier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpRequest == nil { + m.HttpRequest = &httpgrpc.HTTPRequest{} + } + if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FrontendAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResultRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResultRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResultRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpResponse == nil { + m.HttpResponse = &httpgrpc.HTTPResponse{} + } + if err := m.HttpResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResultResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResultResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResultResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FrontendToScheduler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FrontendToScheduler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FrontendToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= FrontendToSchedulerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FrontendAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpRequest == nil { + m.HttpRequest = &httpgrpc.HTTPRequest{} + } + if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerToFrontend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerToFrontend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerToFrontend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= SchedulerToFrontendStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFrontend(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFrontend + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthFrontend + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFrontend(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthFrontend + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFrontend = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFrontend = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/querier/frontend2/frontend.proto b/pkg/querier/frontend2/frontend.proto new file mode 100644 index 00000000000..e420602ba25 --- /dev/null +++ b/pkg/querier/frontend2/frontend.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package frontend2; + +option go_package = "frontend2"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Scheduler interface exposed to Queriers. (Previously called Frontend) +service SchedulerForQuerier { + // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for + // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) { }; +} + +// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. +// To signal that querier is ready to accept another request, querier sends empty message. +message QuerierToScheduler { + string querierID = 1; +} + +message SchedulerToQuerier { + uint64 queryID = 1; + httpgrpc.HTTPRequest httpRequest = 2; + + // Where should querier send HTTP Response to (using FrontendForQuerier interface). + string frontendAddress = 3; + + // User who initiated the request. Needed to send reply back to frontend. + string userID = 4; +} + +// Frontend interface exposed to Queriers. Used by queriers to report back the result of the query. +service FrontendForQuerier { + rpc QueryResult (QueryResultRequest) returns (QueryResultResponse) { }; +} + +message QueryResultRequest { + uint64 queryID = 1; + httpgrpc.HTTPResponse httpResponse = 2; +} + +message QueryResultResponse { } + +// Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. +service SchedulerForFrontend { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) { }; +} + +enum FrontendToSchedulerType { + INIT = 0; + ENQUEUE = 1; + CANCEL = 2; +} + +message FrontendToScheduler { + FrontendToSchedulerType type = 1; + + // Used by INIT message. Will be put into all requests passed to querier. + string frontendAddress = 2; + + // Used by ENQUEUE and CANCEL. Each enqueued query must have queryID higher than previous one. + uint64 queryID = 3; + + // Following are used by ENQUEUE only. + string userID = 4; + httpgrpc.HTTPRequest httpRequest = 5; +} + +enum SchedulerToFrontendStatus { + OK = 0; + TOO_MANY_REQUESTS_PER_TENANT = 1; + ERROR = 2; + SHUTTING_DOWN = 3; +} + +message SchedulerToFrontend { + SchedulerToFrontendStatus status = 1; + string error = 2; +} diff --git a/pkg/querier/frontend2/frontend2.go b/pkg/querier/frontend2/frontend2.go new file mode 100644 index 00000000000..b3bfcbb8d8f --- /dev/null +++ b/pkg/querier/frontend2/frontend2.go @@ -0,0 +1,290 @@ +package frontend2 + +import ( + "context" + "flag" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" + "go.uber.org/atomic" + + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Config for a Frontend2. +type Config struct { + SchedulerAddr string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` + WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` + GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` + + // Used to find local IP address, that is sent to scheduler and querier-worker. + InfNames []string `yaml:"interface_names"` + + // If set, address is not computed from interfaces. + Addr string `yaml:"address" doc:"hidden"` + Port int `doc:"hidden"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.SchedulerAddr, "frontend.scheduler-address", "", "DNS hostname used for finding schedulers.") + f.DurationVar(&cfg.DNSLookupPeriod, "frontend.scheduler-dns-lookup-period", 10*time.Second, "How often to query DNS.") + f.IntVar(&cfg.WorkerConcurrency, "frontend.scheduler-worker-concurrency", 5, "Number of goroutines pushing requests to ") + + cfg.InfNames = []string{"eth0", "en0"} + f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.interface", "Name of network interface to read address from.") + f.StringVar(&cfg.Addr, "frontend.address", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") + f.IntVar(&cfg.Port, "frontend.port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") + + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) +} + +// Frontend2 implements GrpcRoundTripper. It queues HTTP requests, +// dispatches them to backends via gRPC, and handles retries for requests which failed. +type Frontend2 struct { + services.Service + + cfg Config + log log.Logger + + lastQueryID atomic.Uint64 + + // frontend workers will read from this channel, and send request to scheduler. + requestsCh chan *frontendRequest + + schedulerWorkers *frontendSchedulerWorkers + requests *requestsInProgress +} + +type frontendRequest struct { + queryID uint64 + request *httpgrpc.HTTPRequest + userID string + + cancel context.CancelFunc + + enqueue chan enqueueResult + response chan *httpgrpc.HTTPResponse +} + +type enqueueResult struct { + success bool // True if request was sent to scheduler successfully, and frontend should wait for response. + retry bool // Whether request can be retried. + + cancelCh chan<- uint64 // Channel that can be used for request cancellation. If nil, cancellation is not possible. +} + +// New creates a new frontend. +func NewFrontend2(cfg Config, log log.Logger, reg prometheus.Registerer) (*Frontend2, error) { + requestsCh := make(chan *frontendRequest) + + schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, fmt.Sprintf("%s:%d", cfg.Addr, cfg.Port), requestsCh, log) + if err != nil { + return nil, err + } + + f := &Frontend2{ + cfg: cfg, + log: log, + requestsCh: requestsCh, + schedulerWorkers: schedulerWorkers, + requests: newRequestsInProgress(), + } + // Randomize to avoid getting responses from queries sent before restart (which could lead to leak between tenants). + // This isn't perfect, but better than nothing. + f.lastQueryID.Store(rand.Uint64()) + + promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_frontend_queries_in_progress", + Help: "Number of queries in progress handled by this frontend.", + }, func() float64 { + return float64(f.requests.count()) + }) + + promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_frontend_connected_schedulers", + Help: "Number of schedulers this frontend is connected to.", + }, func() float64 { + return float64(f.schedulerWorkers.getWorkersCount()) + }) + + f.Service = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (f *Frontend2) starting(ctx context.Context) error { + return errors.Wrap(services.StartAndAwaitRunning(ctx, f.schedulerWorkers), "failed to start frontend scheduler workers") +} + +func (f *Frontend2) stopping(_ error) error { + return errors.Wrap(services.StopAndAwaitTerminated(context.Background(), f.schedulerWorkers), "failed to stop frontend scheduler workers") +} + +// RoundTripGRPC round trips a proto (instead of a HTTP request). +func (f *Frontend2) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { + if s := f.State(); s != services.Running { + return nil, fmt.Errorf("frontend not running: %v", s) + } + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + // Propagate trace context in gRPC too - this will be ignored if using HTTP. + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) + if tracer != nil && span != nil { + carrier := (*httpgrpcHeadersCarrier)(req) + if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil { + return nil, err + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + freq := &frontendRequest{ + queryID: f.lastQueryID.Inc(), + request: req, + userID: userID, + + cancel: cancel, + + // Buffer of 1 to ensure response or error can be written to the channel + // even if this goroutine goes away due to client context cancellation. + enqueue: make(chan enqueueResult, 1), + response: make(chan *httpgrpc.HTTPResponse, 1), + } + + f.requests.put(freq) + defer f.requests.delete(freq.queryID) + + retries := f.cfg.WorkerConcurrency + 1 // To make sure we hit at least two different schedulers. + +enqueueAgain: + select { + case <-ctx.Done(): + return nil, ctx.Err() + + case f.requestsCh <- freq: + // Enqueued, let's wait for response. + } + + var cancelCh chan<- uint64 + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case er := <-freq.enqueue: + if er.success { + cancelCh = er.cancelCh + break + } + + if er.retry { + retries-- + if retries > 0 { + goto enqueueAgain + } + } + + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "failed to enqueue request") + } + + select { + case <-ctx.Done(): + if cancelCh != nil { + select { + case cancelCh <- freq.queryID: + // cancellation sent. + default: + // failed to cancel, ignore. + } + } + return nil, ctx.Err() + + case resp := <-freq.response: + return resp, nil + } +} + +func (f *Frontend2) QueryResult(_ context.Context, qrReq *QueryResultRequest) (*QueryResultResponse, error) { + req := f.requests.get(qrReq.QueryID) + if req != nil { + select { + case req.response <- qrReq.HttpResponse: + // Should always be possible, unless QueryResult is called multiple times with the same queryID. + default: + // If we cannot write to the channel, just ignore it. + } + } + + return &QueryResultResponse{}, nil +} + +// CheckReady determines if the query frontend is ready. Function parameters/return +// chosen to match the same method in the ingester +func (f *Frontend2) CheckReady(_ context.Context) error { + workers := f.schedulerWorkers.getWorkersCount() + + // If frontend is connected to at least one scheduler, we are ready. + if workers > 0 { + return nil + } + + msg := fmt.Sprintf("not ready: number of schedulers this worker is connected to is %d", workers) + level.Info(f.log).Log("msg", msg) + return errors.New(msg) +} + +type requestsInProgress struct { + mu sync.Mutex + requests map[uint64]*frontendRequest +} + +func newRequestsInProgress() *requestsInProgress { + return &requestsInProgress{ + requests: map[uint64]*frontendRequest{}, + } +} + +func (r *requestsInProgress) count() int { + r.mu.Lock() + defer r.mu.Unlock() + + return len(r.requests) +} + +func (r *requestsInProgress) put(req *frontendRequest) { + r.mu.Lock() + defer r.mu.Unlock() + + r.requests[req.queryID] = req +} + +func (r *requestsInProgress) delete(queryID uint64) { + r.mu.Lock() + defer r.mu.Unlock() + + delete(r.requests, queryID) +} + +func (r *requestsInProgress) get(queryID uint64) *frontendRequest { + r.mu.Lock() + defer r.mu.Unlock() + + return r.requests[queryID] +} diff --git a/pkg/querier/frontend2/frontend2_test.go b/pkg/querier/frontend2/frontend2_test.go new file mode 100644 index 00000000000..4c3bef7490a --- /dev/null +++ b/pkg/querier/frontend2/frontend2_test.go @@ -0,0 +1,258 @@ +package frontend2 + +import ( + "context" + "net" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/stretchr/testify/require" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" + "go.uber.org/atomic" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/test" +) + +const testFrontendWorkerConcurrency = 5 + +func setupFrontend2(t *testing.T, schedulerReplyFunc func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend) (*Frontend2, *mockScheduler) { + l, err := net.Listen("tcp", "") + require.NoError(t, err) + + server := grpc.NewServer() + + h, p, err := net.SplitHostPort(l.Addr().String()) + require.NoError(t, err) + + grpcPort, err := strconv.Atoi(p) + require.NoError(t, err) + + cfg := Config{} + flagext.DefaultValues(&cfg) + cfg.SchedulerAddr = l.Addr().String() + cfg.WorkerConcurrency = testFrontendWorkerConcurrency + cfg.Addr = h + cfg.Port = grpcPort + + //logger := log.NewLogfmtLogger(os.Stdout) + logger := log.NewNopLogger() + f, err := NewFrontend2(cfg, logger, nil) + require.NoError(t, err) + + RegisterFrontendForQuerierServer(server, f) + + ms := newMockScheduler(t, f, schedulerReplyFunc) + RegisterSchedulerForFrontendServer(server, ms) + + require.NoError(t, services.StartAndAwaitRunning(context.Background(), f)) + t.Cleanup(func() { + _ = services.StopAndAwaitTerminated(context.Background(), f) + }) + + go func() { + _ = server.Serve(l) + }() + + t.Cleanup(func() { + _ = l.Close() + }) + + // Wait for frontend to connect to scheduler. + test.Poll(t, 1*time.Second, 1, func() interface{} { + ms.mu.Lock() + defer ms.mu.Unlock() + + return len(ms.frontendAddr) + }) + + return f, ms +} + +func sendResponseWithDelay(f *Frontend2, delay time.Duration, queryID uint64, resp *httpgrpc.HTTPResponse) { + if delay > 0 { + time.Sleep(delay) + } + + _, _ = f.QueryResult(context.Background(), &QueryResultRequest{ + QueryID: queryID, + HttpResponse: resp, + }) +} + +func TestFrontendBasicWorkflow(t *testing.T) { + body := "all fine here" + + f, _ := setupFrontend2(t, func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend { + // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. + // It first needs to be told that enqueuing has succeeded. + go sendResponseWithDelay(f, 100*time.Millisecond, msg.QueryID, &httpgrpc.HTTPResponse{ + Code: 200, + Body: []byte(body), + }) + + return &SchedulerToFrontend{Status: OK} + }) + + resp, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), "test"), &httpgrpc.HTTPRequest{}) + require.NoError(t, err) + require.Equal(t, int32(200), resp.Code) + require.Equal(t, []byte(body), resp.Body) +} + +func TestFrontendRetryEnqueue(t *testing.T) { + // Frontend uses worker concurrency to compute number of retries. We use one less failure. + failures := atomic.NewInt64(testFrontendWorkerConcurrency - 1) + body := "hello world" + + f, _ := setupFrontend2(t, func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend { + fail := failures.Dec() + if fail >= 0 { + return &SchedulerToFrontend{Status: SHUTTING_DOWN} + } + + go sendResponseWithDelay(f, 100*time.Millisecond, msg.QueryID, &httpgrpc.HTTPResponse{ + Code: 200, + Body: []byte(body), + }) + + return &SchedulerToFrontend{Status: OK} + }) + + _, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), "test"), &httpgrpc.HTTPRequest{}) + require.NoError(t, err) +} + +func TestFrontendEnqueueFailure(t *testing.T) { + f, _ := setupFrontend2(t, func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend { + return &SchedulerToFrontend{Status: SHUTTING_DOWN} + }) + + _, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), "test"), &httpgrpc.HTTPRequest{}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "failed to enqueue request")) +} + +func TestFrontendCancellation(t *testing.T) { + f, ms := setupFrontend2(t, nil) + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + resp, err := f.RoundTripGRPC(user.InjectOrgID(ctx, "test"), &httpgrpc.HTTPRequest{}) + require.EqualError(t, err, context.DeadlineExceeded.Error()) + require.Nil(t, resp) + + // We wait a bit to make sure scheduler receives the cancellation request. + test.Poll(t, time.Second, 2, func() interface{} { + ms.mu.Lock() + defer ms.mu.Unlock() + + return len(ms.msgs) + }) + + ms.checkWithLock(func() { + require.Equal(t, 2, len(ms.msgs)) + require.True(t, ms.msgs[0].Type == ENQUEUE) + require.True(t, ms.msgs[1].Type == CANCEL) + require.True(t, ms.msgs[0].QueryID == ms.msgs[1].QueryID) + }) +} + +func TestFrontendFailedCancellation(t *testing.T) { + f, ms := setupFrontend2(t, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + time.Sleep(100 * time.Millisecond) + + // stop scheduler workers + addr := "" + f.schedulerWorkers.mu.Lock() + for k := range f.schedulerWorkers.workers { + addr = k + break + } + f.schedulerWorkers.mu.Unlock() + + f.schedulerWorkers.AddressRemoved(addr) + + // Wait for worker goroutines to stop. + time.Sleep(100 * time.Millisecond) + + // Cancel request. Frontend will try to send cancellation to scheduler, but that will fail (not visible to user). + // Everything else should still work fine. + cancel() + }() + + // send request + resp, err := f.RoundTripGRPC(user.InjectOrgID(ctx, "test"), &httpgrpc.HTTPRequest{}) + require.EqualError(t, err, context.Canceled.Error()) + require.Nil(t, resp) + + ms.checkWithLock(func() { + require.Equal(t, 1, len(ms.msgs)) + }) +} + +type mockScheduler struct { + t *testing.T + f *Frontend2 + + replyFunc func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend + + mu sync.Mutex + frontendAddr map[string]int + msgs []*FrontendToScheduler +} + +func newMockScheduler(t *testing.T, f *Frontend2, replyFunc func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend) *mockScheduler { + return &mockScheduler{t: t, f: f, frontendAddr: map[string]int{}, replyFunc: replyFunc} +} + +func (m *mockScheduler) checkWithLock(fn func()) { + m.mu.Lock() + defer m.mu.Unlock() + + fn() +} + +func (m *mockScheduler) FrontendLoop(frontend SchedulerForFrontend_FrontendLoopServer) error { + init, err := frontend.Recv() + if err != nil { + return err + } + + m.mu.Lock() + m.frontendAddr[init.FrontendAddress]++ + m.mu.Unlock() + + for { + msg, err := frontend.Recv() + if err != nil { + return err + } + + m.mu.Lock() + m.msgs = append(m.msgs, msg) + m.mu.Unlock() + + reply := &SchedulerToFrontend{Status: OK} + if m.replyFunc != nil { + reply = m.replyFunc(m.f, msg) + } + + if err := frontend.Send(reply); err != nil { + return err + } + } +} diff --git a/pkg/querier/frontend2/frontend_querier_queues.go b/pkg/querier/frontend2/frontend_querier_queues.go new file mode 100644 index 00000000000..fa3739f4f46 --- /dev/null +++ b/pkg/querier/frontend2/frontend_querier_queues.go @@ -0,0 +1,224 @@ +package frontend2 + +import ( + "math/rand" + "sort" + + "github.com/cortexproject/cortex/pkg/util" +) + +// This struct holds user queues for pending requests. It also keeps track of connected queriers, +// and mapping between users and queriers. +type queues struct { + userQueues map[string]*userQueue + + // List of all users with queues, used for iteration when searching for next queue to handle. + // Users removed from the middle are replaced with "". To avoid skipping users during iteration, we only shrink + // this list when there are ""'s at the end of it. + users []string + + maxUserQueueSize int + + // Number of connections per querier. + querierConnections map[string]int + // Sorted list of querier names, used when creating per-user shard. + sortedQueriers []string +} + +type userQueue struct { + ch chan *schedulerRequest + + // If not nil, only these queriers can handle user requests. If nil, all queriers can. + // We set this to nil if number of available queriers <= maxQueriers. + queriers map[string]struct{} + maxQueriers int + + // Seed for shuffle sharding of queriers. This seed is based on userID only and is therefore consistent + // between different frontends. + seed int64 + + // Points back to 'users' field in queues. Enables quick cleanup. + index int +} + +func newUserQueues(maxUserQueueSize int) *queues { + return &queues{ + userQueues: map[string]*userQueue{}, + users: nil, + maxUserQueueSize: maxUserQueueSize, + querierConnections: map[string]int{}, + sortedQueriers: nil, + } +} + +func (q *queues) len() int { + return len(q.userQueues) +} + +func (q *queues) deleteQueue(userID string) { + uq := q.userQueues[userID] + if uq == nil { + return + } + + delete(q.userQueues, userID) + q.users[uq.index] = "" + + // Shrink users list size if possible. This is safe, and no users will be skipped during iteration. + for ix := len(q.users) - 1; ix >= 0 && q.users[ix] == ""; ix-- { + q.users = q.users[:ix] + } +} + +// Returns existing or new queue for user. +// MaxQueriers is used to compute which queriers should handle requests for this user. +// If maxQueriers is <= 0, all queriers can handle this user's requests. +// If maxQueriers has changed since the last call, queriers for this are recomputed. +func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan *schedulerRequest { + // Empty user is not allowed, as that would break our users list ("" is used for free spot). + if userID == "" { + return nil + } + + if maxQueriers < 0 { + maxQueriers = 0 + } + + uq := q.userQueues[userID] + + if uq == nil { + uq = &userQueue{ + ch: make(chan *schedulerRequest, q.maxUserQueueSize), + seed: util.ShuffleShardSeed(userID, ""), + index: -1, + } + q.userQueues[userID] = uq + + // Add user to the list of users... find first free spot, and put it there. + for ix, u := range q.users { + if u == "" { + uq.index = ix + q.users[ix] = userID + break + } + } + + // ... or add to the end. + if uq.index < 0 { + uq.index = len(q.users) + q.users = append(q.users, userID) + } + } + + if uq.maxQueriers != maxQueriers { + uq.maxQueriers = maxQueriers + uq.queriers = shuffleQueriersForUser(uq.seed, maxQueriers, q.sortedQueriers, nil) + } + + return uq.ch +} + +// Finds next queue for the querier. To support fair scheduling between users, client is expected +// to pass last user index returned by this function as argument. Is there was no previous +// last user index, use -1. +func (q *queues) getNextQueueForQuerier(lastUserIndex int, querier string) (chan *schedulerRequest, string, int) { + uid := lastUserIndex + + for iters := 0; iters < len(q.users); iters++ { + uid = uid + 1 + + // Don't use "mod len(q.users)", as that could skip users at the beginning of the list + // for example when q.users has shrunk since last call. + if uid >= len(q.users) { + uid = 0 + } + + u := q.users[uid] + if u == "" { + continue + } + + q := q.userQueues[u] + + if q.queriers != nil { + if _, ok := q.queriers[querier]; !ok { + // This querier is not handling the user. + continue + } + } + + return q.ch, u, uid + } + return nil, "", uid +} + +func (q *queues) addQuerierConnection(querier string) { + conns := q.querierConnections[querier] + + q.querierConnections[querier] = conns + 1 + + // First connection from this querier. + if conns == 0 { + q.sortedQueriers = append(q.sortedQueriers, querier) + sort.Strings(q.sortedQueriers) + + q.recomputeUserQueriers() + } +} + +func (q *queues) removeQuerierConnection(querier string) { + conns := q.querierConnections[querier] + if conns <= 0 { + panic("unexpected number of connections for querier") + } + + conns-- + if conns > 0 { + q.querierConnections[querier] = conns + } else { + delete(q.querierConnections, querier) + + ix := sort.SearchStrings(q.sortedQueriers, querier) + if ix >= len(q.sortedQueriers) || q.sortedQueriers[ix] != querier { + panic("incorrect state of sorted queriers") + } + + q.sortedQueriers = append(q.sortedQueriers[:ix], q.sortedQueriers[ix+1:]...) + + q.recomputeUserQueriers() + } +} + +func (q *queues) recomputeUserQueriers() { + scratchpad := make([]string, 0, len(q.sortedQueriers)) + + for _, uq := range q.userQueues { + uq.queriers = shuffleQueriersForUser(uq.seed, uq.maxQueriers, q.sortedQueriers, scratchpad) + } +} + +// Scratchpad is used for shuffling, to avoid new allocations. If nil, new slice is allocated. +// shuffleQueriersForUser returns nil if queriersToSelect is 0 or there are not enough queriers to select from. +// In that case *all* queriers should be used. +func shuffleQueriersForUser(userSeed int64, queriersToSelect int, allSortedQueriers []string, scratchpad []string) map[string]struct{} { + if queriersToSelect == 0 || len(allSortedQueriers) <= queriersToSelect { + return nil + } + + result := make(map[string]struct{}, queriersToSelect) + rnd := rand.New(rand.NewSource(userSeed)) + + scratchpad = scratchpad[:0] + scratchpad = append(scratchpad, allSortedQueriers...) + + last := len(scratchpad) - 1 + for i := 0; i < queriersToSelect; i++ { + r := rnd.Intn(last + 1) + result[scratchpad[r]] = struct{}{} + // move selected item to the end, it won't be selected anymore. + scratchpad[r], scratchpad[last] = scratchpad[last], scratchpad[r] + last-- + } + + return result +} diff --git a/pkg/querier/frontend2/frontend_querier_queues_test.go b/pkg/querier/frontend2/frontend_querier_queues_test.go new file mode 100644 index 00000000000..1337cea8e3d --- /dev/null +++ b/pkg/querier/frontend2/frontend_querier_queues_test.go @@ -0,0 +1,291 @@ +package frontend2 + +import ( + "fmt" + "math" + "math/rand" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestQueues(t *testing.T) { + uq := newUserQueues(0) + assert.NotNil(t, uq) + assert.NoError(t, isConsistent(uq)) + + q, u, lastUserIndex := uq.getNextQueueForQuerier(-1, "querier-1") + assert.Nil(t, q) + assert.Equal(t, "", u) + + // Add queues: [one] + qOne := getOrAdd(t, uq, "one", 0) + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qOne, qOne) + + // [one two] + qTwo := getOrAdd(t, uq, "two", 0) + assert.NotEqual(t, qOne, qTwo) + + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qOne, qTwo, qOne) + confirmOrderForQuerier(t, uq, "querier-2", -1, qOne, qTwo, qOne) + + // [one two three] + // confirm fifo by adding a third queue and iterating to it + qThree := getOrAdd(t, uq, "three", 0) + + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qOne) + + // Remove one: ["" two three] + uq.deleteQueue("one") + assert.NoError(t, isConsistent(uq)) + + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qTwo) + + // "four" is added at the beginning of the list: [four two three] + qFour := getOrAdd(t, uq, "four", 0) + + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qThree, qFour, qTwo, qThree) + + // Remove two: [four "" three] + uq.deleteQueue("two") + assert.NoError(t, isConsistent(uq)) + + lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qFour, qThree, qFour) + + // Remove three: [four] + uq.deleteQueue("three") + assert.NoError(t, isConsistent(uq)) + + // Remove four: [] + uq.deleteQueue("four") + assert.NoError(t, isConsistent(uq)) + + q, _, _ = uq.getNextQueueForQuerier(lastUserIndex, "querier-1") + assert.Nil(t, q) +} + +func TestQueuesWithQueriers(t *testing.T) { + uq := newUserQueues(0) + assert.NotNil(t, uq) + assert.NoError(t, isConsistent(uq)) + + queriers := 30 + users := 1000 + maxQueriersPerUser := 5 + + // Add some queriers. + for ix := 0; ix < queriers; ix++ { + qid := fmt.Sprintf("querier-%d", ix) + uq.addQuerierConnection(qid) + + // No querier has any queues yet. + q, u, _ := uq.getNextQueueForQuerier(-1, qid) + assert.Nil(t, q) + assert.Equal(t, "", u) + } + + assert.NoError(t, isConsistent(uq)) + + // Add user queues. + for u := 0; u < users; u++ { + uid := fmt.Sprintf("user-%d", u) + getOrAdd(t, uq, uid, maxQueriersPerUser) + + // Verify it has maxQueriersPerUser queriers assigned now. + qs := uq.userQueues[uid].queriers + assert.Equal(t, maxQueriersPerUser, len(qs)) + } + + // After adding all users, verify results. For each querier, find out how many different users it handles, + // and compute mean and stdDev. + queriersMap := make(map[string]int) + + for q := 0; q < queriers; q++ { + qid := fmt.Sprintf("querier-%d", q) + + lastUserIndex := -1 + for { + _, _, newIx := uq.getNextQueueForQuerier(lastUserIndex, qid) + if newIx < lastUserIndex { + break + } + lastUserIndex = newIx + queriersMap[qid]++ + } + } + + mean := float64(0) + for _, c := range queriersMap { + mean += float64(c) + } + mean = mean / float64(len(queriersMap)) + + stdDev := float64(0) + for _, c := range queriersMap { + d := float64(c) - mean + stdDev += (d * d) + } + stdDev = math.Sqrt(stdDev / float64(len(queriersMap))) + t.Log("mean:", mean, "stddev:", stdDev) + + assert.InDelta(t, users*maxQueriersPerUser/queriers, mean, 1) + assert.InDelta(t, stdDev, 0, mean*0.2) +} + +func TestQueuesConsistency(t *testing.T) { + uq := newUserQueues(0) + assert.NotNil(t, uq) + assert.NoError(t, isConsistent(uq)) + + r := rand.New(rand.NewSource(time.Now().Unix())) + + lastUserIndexes := map[string]int{} + + conns := map[string]int{} + + for i := 0; i < 1000; i++ { + switch r.Int() % 6 { + case 0: + assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), 3)) + case 1: + qid := generateQuerier(r) + _, _, luid := uq.getNextQueueForQuerier(lastUserIndexes[qid], qid) + lastUserIndexes[qid] = luid + case 2: + uq.deleteQueue(generateTenant(r)) + case 3: + q := generateQuerier(r) + uq.addQuerierConnection(q) + conns[q]++ + case 4: + q := generateQuerier(r) + if conns[q] > 0 { + uq.removeQuerierConnection(q) + conns[q]-- + } + } + + assert.NoErrorf(t, isConsistent(uq), "last action %d", i) + } +} + +func generateTenant(r *rand.Rand) string { + return fmt.Sprint("tenant-", r.Int()%5) +} + +func generateQuerier(r *rand.Rand) string { + return fmt.Sprint("querier-", r.Int()%5) +} + +func getOrAdd(t *testing.T, uq *queues, tenant string, maxQueriers int) chan *schedulerRequest { + q := uq.getOrAddQueue(tenant, maxQueriers) + assert.NotNil(t, q) + assert.NoError(t, isConsistent(uq)) + assert.Equal(t, q, uq.getOrAddQueue(tenant, maxQueriers)) + return q +} + +func confirmOrderForQuerier(t *testing.T, uq *queues, querier string, lastUserIndex int, qs ...chan *schedulerRequest) int { + var n chan *schedulerRequest + for _, q := range qs { + n, _, lastUserIndex = uq.getNextQueueForQuerier(lastUserIndex, querier) + assert.Equal(t, q, n) + assert.NoError(t, isConsistent(uq)) + } + return lastUserIndex +} + +func isConsistent(uq *queues) error { + if len(uq.sortedQueriers) != len(uq.querierConnections) { + return fmt.Errorf("inconsistent number of sorted queriers and querier connections") + } + + uc := 0 + for ix, u := range uq.users { + q := uq.userQueues[u] + if u != "" && q == nil { + return fmt.Errorf("user %s doesn't have queue", u) + } + if u == "" && q != nil { + return fmt.Errorf("user %s shouldn't have queue", u) + } + if u == "" { + continue + } + + uc++ + + if q.index != ix { + return fmt.Errorf("invalid user's index, expected=%d, got=%d", ix, q.index) + } + + if q.maxQueriers == 0 && q.queriers != nil { + return fmt.Errorf("user %s has queriers, but maxQueriers=0", u) + } + + if q.maxQueriers > 0 && len(uq.sortedQueriers) <= q.maxQueriers && q.queriers != nil { + return fmt.Errorf("user %s has queriers set despite not enough queriers available", u) + } + + if q.maxQueriers > 0 && len(uq.sortedQueriers) > q.maxQueriers && len(q.queriers) != q.maxQueriers { + return fmt.Errorf("user %s has incorrect number of queriers, expected=%d, got=%d", u, len(q.queriers), q.maxQueriers) + } + } + + if uc != len(uq.userQueues) { + return fmt.Errorf("inconsistent number of users list and user queues") + } + + return nil +} + +func TestShuffleQueriers(t *testing.T) { + allQueriers := []string{"a", "b", "c", "d", "e"} + + require.Nil(t, shuffleQueriersForUser(12345, 10, allQueriers, nil)) + require.Nil(t, shuffleQueriersForUser(12345, len(allQueriers), allQueriers, nil)) + + r1 := shuffleQueriersForUser(12345, 3, allQueriers, nil) + require.Equal(t, 3, len(r1)) + + // Same input produces same output. + r2 := shuffleQueriersForUser(12345, 3, allQueriers, nil) + require.Equal(t, 3, len(r2)) + require.Equal(t, r1, r2) +} + +func TestShuffleQueriersCorrectness(t *testing.T) { + const queriersCount = 100 + + var allSortedQueriers []string + for i := 0; i < queriersCount; i++ { + allSortedQueriers = append(allSortedQueriers, fmt.Sprintf("%d", i)) + } + sort.Strings(allSortedQueriers) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + const tests = 1000 + for i := 0; i < tests; i++ { + toSelect := r.Intn(queriersCount) + if toSelect == 0 { + toSelect = 3 + } + + selected := shuffleQueriersForUser(r.Int63(), toSelect, allSortedQueriers, nil) + + require.Equal(t, toSelect, len(selected)) + + sort.Strings(allSortedQueriers) + prevQuerier := "" + for _, q := range allSortedQueriers { + require.True(t, prevQuerier < q, "non-unique querier") + prevQuerier = q + + ix := sort.SearchStrings(allSortedQueriers, q) + require.True(t, ix < len(allSortedQueriers) && allSortedQueriers[ix] == q, "selected querier is not between all queriers") + } + } +} diff --git a/pkg/querier/frontend2/frontend_scheduler_worker.go b/pkg/querier/frontend2/frontend_scheduler_worker.go new file mode 100644 index 00000000000..cb0095e72ca --- /dev/null +++ b/pkg/querier/frontend2/frontend_scheduler_worker.go @@ -0,0 +1,303 @@ +package frontend2 + +import ( + "context" + "errors" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/httpgrpc" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +type frontendSchedulerWorkers struct { + services.Service + + cfg Config + log log.Logger + frontendAddress string + + // Channel with requests that should be forwarded to the scheduler. + requestsCh <-chan *frontendRequest + + watcher services.Service + + mu sync.Mutex + // Set to nil when stop is called... no more workers are created afterwards. + workers map[string]*frontendSchedulerWorker +} + +func newFrontendSchedulerWorkers(cfg Config, frontendAddress string, requestsCh <-chan *frontendRequest, log log.Logger) (*frontendSchedulerWorkers, error) { + f := &frontendSchedulerWorkers{ + cfg: cfg, + log: log, + frontendAddress: frontendAddress, + requestsCh: requestsCh, + workers: map[string]*frontendSchedulerWorker{}, + } + + w, err := NewDNSWatcher(cfg.SchedulerAddr, cfg.DNSLookupPeriod, f) + if err != nil { + return nil, err + } + + f.watcher = w + f.Service = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (f *frontendSchedulerWorkers) starting(ctx context.Context) error { + return services.StartAndAwaitRunning(ctx, f.watcher) +} + +func (f *frontendSchedulerWorkers) stopping(_ error) error { + err := services.StopAndAwaitTerminated(context.Background(), f.watcher) + + f.mu.Lock() + defer f.mu.Unlock() + + for _, w := range f.workers { + w.stop() + } + f.workers = nil + + return err +} + +func (f *frontendSchedulerWorkers) AddressAdded(address string) { + f.mu.Lock() + ws := f.workers + w := f.workers[address] + f.mu.Unlock() + + // Already stopped or we already have worker for this address. + if ws == nil || w != nil { + return + } + + level.Debug(f.log).Log("msg", "adding connection to scheduler", "addr", address) + conn, err := f.connectToScheduler(context.Background(), address) + if err != nil { + level.Error(f.log).Log("msg", "error connecting to scheduler", "addr", address, "err", err) + return + } + + // If not, start a new one. + w = newFrontendSchedulerWorker(conn, address, f.frontendAddress, f.requestsCh, f.cfg.WorkerConcurrency, f.log) + + f.mu.Lock() + defer f.mu.Unlock() + + // Can be nil if stopping has been called already. + if f.workers != nil { + f.workers[address] = w + go w.start() + } +} + +func (f *frontendSchedulerWorkers) AddressRemoved(address string) { + level.Debug(f.log).Log("msg", "removing connection to scheduler", "addr", address) + + f.mu.Lock() + // This works fine if f.workers is nil already. + w := f.workers[address] + delete(f.workers, address) + f.mu.Unlock() + + if w != nil { + w.stop() + } +} + +// Get number of workers. +func (f *frontendSchedulerWorkers) getWorkersCount() int { + f.mu.Lock() + defer f.mu.Unlock() + + return len(f.workers) +} + +func (f *frontendSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { + // Because we only use single long-running method, it doesn't make sense to inect user ID, send over tracing or add metrics. + opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + + conn, err := grpc.DialContext(ctx, address, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// Worker managing single gRPC connection to Scheduler. Each worker starts multiple goroutines for forwarding +// requests and cancellations to scheduler. +type frontendSchedulerWorker struct { + log log.Logger + + conn *grpc.ClientConn + concurrency int + schedulerAddr string + frontendAddr string + + // Context and cancellation used by individual goroutines. + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // Shared between all frontend workers. + requestCh <-chan *frontendRequest + + // Cancellation requests for this scheduler are received via this channel. It is passed to frontend after + // query has been enqueued to scheduler. + cancelCh chan uint64 +} + +func newFrontendSchedulerWorker(conn *grpc.ClientConn, schedulerAddr string, frontendAddr string, requestCh <-chan *frontendRequest, concurrency int, log log.Logger) *frontendSchedulerWorker { + w := &frontendSchedulerWorker{ + log: log, + conn: conn, + concurrency: concurrency, + schedulerAddr: schedulerAddr, + frontendAddr: frontendAddr, + requestCh: requestCh, + cancelCh: make(chan uint64), + } + w.ctx, w.cancel = context.WithCancel(context.Background()) + + return w +} + +func (w *frontendSchedulerWorker) start() { + client := NewSchedulerForFrontendClient(w.conn) + + for i := 0; i < w.concurrency; i++ { + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.runOne(w.ctx, client) + }() + } +} + +func (w *frontendSchedulerWorker) stop() { + w.cancel() + w.wg.Wait() + if err := w.conn.Close(); err != nil { + level.Error(w.log).Log("msg", "error while closing connection to scheduler", "err", err) + } +} + +func (w *frontendSchedulerWorker) runOne(ctx context.Context, client SchedulerForFrontendClient) { + backoffConfig := util.BackoffConfig{ + MinBackoff: 50 * time.Millisecond, + MaxBackoff: 1 * time.Second, + } + + backoff := util.NewBackoff(ctx, backoffConfig) + for backoff.Ongoing() { + loop, loopErr := client.FrontendLoop(ctx) + if loopErr != nil { + level.Error(w.log).Log("msg", "error contacting scheduler", "err", loopErr, "addr", w.schedulerAddr) + backoff.Wait() + continue + } + + loopErr = w.schedulerLoop(ctx, loop) + if closeErr := loop.CloseSend(); closeErr != nil { + level.Debug(w.log).Log("msg", "failed to close frontend loop", "err", loopErr, "addr", w.schedulerAddr) + } + + if loopErr != nil { + level.Error(w.log).Log("msg", "error sending requests to scheduler", "err", loopErr, "addr", w.schedulerAddr) + backoff.Wait() + continue + } + + backoff.Reset() + } +} + +func (w *frontendSchedulerWorker) schedulerLoop(ctx context.Context, loop SchedulerForFrontend_FrontendLoopClient) error { + if err := loop.Send(&FrontendToScheduler{ + Type: INIT, + FrontendAddress: w.frontendAddr, + }); err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + + case req := <-w.requestCh: + err := loop.Send(&FrontendToScheduler{ + Type: ENQUEUE, + QueryID: req.queryID, + UserID: req.userID, + HttpRequest: req.request, + FrontendAddress: w.frontendAddr, + }) + + if err != nil { + req.enqueue <- enqueueResult{success: false, retry: true} + return err + } + + resp, err := loop.Recv() + if err != nil { + req.enqueue <- enqueueResult{success: false, retry: true} + return err + } + + switch resp.Status { + case OK: + req.enqueue <- enqueueResult{success: true, cancelCh: w.cancelCh} + // Response will come from querier. + + case SHUTTING_DOWN: + // Scheduler is shutting down, report failure to enqueue and stop the loop. + req.enqueue <- enqueueResult{success: false, retry: true} + return errors.New("scheduler is shutting down") + + case ERROR: + req.enqueue <- enqueueResult{success: true, retry: false} + req.response <- &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(err.Error()), + } + + case TOO_MANY_REQUESTS_PER_TENANT: + req.enqueue <- enqueueResult{success: true, retry: false} + req.response <- &httpgrpc.HTTPResponse{ + Code: http.StatusTooManyRequests, + Body: []byte("too many outstanding requests"), + } + } + + case reqID := <-w.cancelCh: + err := loop.Send(&FrontendToScheduler{ + Type: CANCEL, + QueryID: reqID, + }) + + if err != nil { + return err + } + + // Not interested in cancellation response. + _, err = loop.Recv() + if err != nil { + return err + } + } + } +} diff --git a/pkg/querier/frontend2/querier_scheduler_worker.go b/pkg/querier/frontend2/querier_scheduler_worker.go new file mode 100644 index 00000000000..9f5ec53c54c --- /dev/null +++ b/pkg/querier/frontend2/querier_scheduler_worker.go @@ -0,0 +1,434 @@ +package frontend2 + +import ( + "context" + "flag" + "fmt" + "net/http" + "os" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/user" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/cortexproject/cortex/pkg/ring/client" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Handler for HTTP requests wrapped in protobuf messages. +type RequestHandler interface { + Handle(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) +} + +type QuerierWorkersConfig struct { + SchedulerAddr string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` + + // Following settings are not exposed via YAML or CLI Flags, but instead copied from "v1" worker config. + GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"-"` // In v1 this is called "frontend client", here we use it for scheduler. + MatchMaxConcurrency bool `yaml:"-"` + MaxConcurrentRequests int `yaml:"-"` // Must be same as passed to PromQL Engine. + Parallelism int `yaml:"-"` + QuerierID string `yaml:"-"` // ID to pass to scheduler when connecting. +} + +func (cfg *QuerierWorkersConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.SchedulerAddr, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. If set, takes precedence over -querier.frontend-address.") + f.DurationVar(&cfg.DNSLookupPeriod, "querier.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve scheduler hostname.") +} + +type querierSchedulerWorkers struct { + *services.BasicService + + cfg QuerierWorkersConfig + requestHandler RequestHandler + + log log.Logger + + subservices *services.Manager + + frontendPool *client.Pool + frontendClientRequestDuration *prometheus.HistogramVec + + mu sync.Mutex + // Set to nil when stop is called... no more workers are created afterwards. + workers map[string]*querierSchedulerWorker +} + +func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler, reg prometheus.Registerer, log log.Logger) (services.Service, error) { + if cfg.SchedulerAddr == "" { + return nil, errors.New("no scheduler address") + } + + if cfg.QuerierID == "" { + hostname, err := os.Hostname() + if err != nil { + return nil, errors.Wrap(err, "failed to get hostname for configuring querier ID") + } + cfg.QuerierID = hostname + } + + frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_querier_scheduler_worker_clients", + Help: "The current number of frontend clients.", + }) + + f := &querierSchedulerWorkers{ + cfg: cfg, + log: log, + requestHandler: handler, + workers: map[string]*querierSchedulerWorker{}, + + frontendClientRequestDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Name: "cortex_querier_scheduler_worker_frontend_request_duration_seconds", + Help: "Time spend doing requests to frontend.", + Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), + }, []string{"operation", "status_code"}), + } + + poolConfig := client.PoolConfig{ + CheckInterval: 5 * time.Second, + HealthCheckEnabled: true, + HealthCheckTimeout: 1 * time.Second, + } + + p := client.NewPool("frontend", poolConfig, nil, f.createFrontendClient, frontendClientsGauge, log) + f.frontendPool = p + + w, err := NewDNSWatcher(cfg.SchedulerAddr, cfg.DNSLookupPeriod, f) + if err != nil { + return nil, err + } + + f.subservices, err = services.NewManager(w, p) + if err != nil { + return nil, errors.Wrap(err, "querier scheduler worker subservices") + } + f.BasicService = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (f *querierSchedulerWorkers) starting(ctx context.Context) error { + return services.StartManagerAndAwaitHealthy(ctx, f.subservices) +} + +func (f *querierSchedulerWorkers) stopping(_ error) error { + return services.StopManagerAndAwaitStopped(context.Background(), f.subservices) +} + +func (f *querierSchedulerWorkers) AddressAdded(address string) { + ctx := f.ServiceContext() + if ctx == nil || ctx.Err() != nil { + return + } + + f.mu.Lock() + defer f.mu.Unlock() + + // We already have worker for this scheduler. + if w := f.workers[address]; w != nil { + return + } + + level.Debug(f.log).Log("msg", "adding connection to scheduler", "addr", address) + conn, err := f.connectToScheduler(context.Background(), address) + if err != nil { + level.Error(f.log).Log("msg", "error connecting to scheduler", "addr", address, "err", err) + return + } + + // If not, start a new one. + f.workers[address] = newQuerierSchedulerWorker(ctx, conn, address, f.requestHandler, f.cfg.GRPCClientConfig.GRPC.MaxSendMsgSize, f.cfg.QuerierID, f.frontendPool, f.log) + f.resetConcurrency() // Called with lock. +} + +// Requires lock. +func (f *querierSchedulerWorkers) resetConcurrency() { + totalConcurrency := 0 + index := 0 + for _, w := range f.workers { + concurrency := 0 + + if f.cfg.MatchMaxConcurrency { + concurrency = f.cfg.MaxConcurrentRequests / len(f.workers) + + // If max concurrency does not evenly divide into our frontends a subset will be chosen + // to receive an extra connection. Frontend addresses were shuffled above so this will be a + // random selection of frontends. + if index < f.cfg.MaxConcurrentRequests%len(f.workers) { + concurrency++ + } + + // If concurrentRequests is 0 then MaxConcurrentRequests is less than the total number of + // schedulers. In order to prevent accidentally starving a scheduler we are just going to + // always connect once to every scheduler. This is dangerous b/c we may start exceeding PromQL + // max concurrency. + if concurrency == 0 { + concurrency = 1 + } + } else { + concurrency = f.cfg.Parallelism + } + + totalConcurrency += concurrency + w.concurrency(concurrency) + index++ + } + + if totalConcurrency > f.cfg.MaxConcurrentRequests { + level.Warn(f.log).Log("msg", "total worker concurrency is greater than promql max concurrency. queries may be queued in the querier which reduces QOS") + } +} + +func (f *querierSchedulerWorkers) AddressRemoved(address string) { + level.Debug(f.log).Log("msg", "removing connection to scheduler", "addr", address) + + f.mu.Lock() + w := f.workers[address] + delete(f.workers, address) + f.mu.Unlock() + + if w != nil { + w.stop() + } +} + +func (f *querierSchedulerWorkers) createFrontendClient(addr string) (client.PoolClient, error) { + opts, err := f.cfg.GRPCClientConfig.DialOption([]grpc.UnaryClientInterceptor{ + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, + cortex_middleware.PrometheusGRPCUnaryInstrumentation(f.frontendClientRequestDuration), + }, nil) + + if err != nil { + return nil, err + } + + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, err + } + + return &frontendClient{ + FrontendForQuerierClient: NewFrontendForQuerierClient(conn), + HealthClient: grpc_health_v1.NewHealthClient(conn), + conn: conn, + }, nil +} + +func (f *querierSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { + // Because we only use single long-running method, it doesn't make sense to inect user ID, send over tracing or add metrics. + opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + + conn, err := grpc.DialContext(ctx, address, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// Worker manages connection to single scheduler, and runs multiple goroutines for handling PromQL requests. +type querierSchedulerWorker struct { + log log.Logger + + schedulerAddress string + handler RequestHandler + maxMessageSize int + querierID string + + // Main context to control all goroutines. + ctx context.Context + wg sync.WaitGroup + + conn *grpc.ClientConn + frontendPool *client.Pool + + // Cancel functions for individual goroutines. + cancelsMu sync.Mutex + cancels []context.CancelFunc +} + +func newQuerierSchedulerWorker(ctx context.Context, conn *grpc.ClientConn, schedulerAddr string, requestHandler RequestHandler, maxMessageSize int, querierID string, frontendPool *client.Pool, log log.Logger) *querierSchedulerWorker { + w := &querierSchedulerWorker{ + schedulerAddress: schedulerAddr, + conn: conn, + handler: requestHandler, + maxMessageSize: maxMessageSize, + querierID: querierID, + + ctx: ctx, + log: log, + frontendPool: frontendPool, + } + return w +} + +func (w *querierSchedulerWorker) stop() { + w.concurrency(0) + + // And wait until they finish. + w.wg.Wait() +} + +func (w *querierSchedulerWorker) concurrency(n int) { + w.cancelsMu.Lock() + defer w.cancelsMu.Unlock() + + if n < 0 { + n = 0 + } + + for len(w.cancels) < n { + ctx, cancel := context.WithCancel(w.ctx) + w.cancels = append(w.cancels, cancel) + + w.wg.Add(1) + go w.runOne(ctx) + } + + for len(w.cancels) > n { + w.cancels[0]() + w.cancels = w.cancels[1:] + } +} + +// runOne loops, trying to establish a stream to the frontend to begin +// request processing. +func (w *querierSchedulerWorker) runOne(ctx context.Context) { + defer w.wg.Done() + + schedulerClient := NewSchedulerForQuerierClient(w.conn) + + backoffConfig := util.BackoffConfig{ + MinBackoff: 50 * time.Millisecond, + MaxBackoff: 1 * time.Second, + } + + backoff := util.NewBackoff(ctx, backoffConfig) + for backoff.Ongoing() { + c, err := schedulerClient.QuerierLoop(ctx) + if err == nil { + err = c.Send(&QuerierToScheduler{QuerierID: w.querierID}) + } + + if err != nil { + level.Error(w.log).Log("msg", "error contacting scheduler", "err", err, "addr", w.schedulerAddress) + backoff.Wait() + continue + } + + if err := w.querierLoop(c); err != nil { + level.Error(w.log).Log("msg", "error processing requests from scheduler", "err", err, "addr", w.schedulerAddress) + backoff.Wait() + continue + } + + backoff.Reset() + } +} + +// process loops processing requests on an established stream. +func (w *querierSchedulerWorker) querierLoop(c SchedulerForQuerier_QuerierLoopClient) error { + // Build a child context so we can cancel a query when the stream is closed. + ctx, cancel := context.WithCancel(c.Context()) + defer cancel() + + for { + request, err := c.Recv() + if err != nil { + return err + } + + // Handle the request on a "background" goroutine, so we go back to + // blocking on c.Recv(). This allows us to detect the stream closing + // and cancel the query. We don't actually handle queries in parallel + // here, as we're running in lock step with the server - each Recv is + // paired with a Send. + go func() { + // We need to inject user into context for sending response back. + ctx := user.InjectOrgID(ctx, request.UserID) + + tracer := opentracing.GlobalTracer() + // Ignore errors here. If we cannot get parent span, we just don't create new one. + parentSpanContext, _ := getParentSpanForRequest(tracer, request.HttpRequest) + if parentSpanContext != nil { + queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "querier_worker_runRequest", opentracing.ChildOf(parentSpanContext)) + defer queueSpan.Finish() + + ctx = spanCtx + } + logger := util.WithContext(ctx, w.log) + + w.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.HttpRequest) + + // Report back to scheduler that processing of the query has finished. + if err := c.Send(&QuerierToScheduler{}); err != nil { + level.Error(logger).Log("msg", "error notifying scheduler about finished query", "err", err, "addr", w.schedulerAddress) + } + }() + } +} + +func (w *querierSchedulerWorker) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, request *httpgrpc.HTTPRequest) { + response, err := w.handler.Handle(ctx, request) + if err != nil { + var ok bool + response, ok = httpgrpc.HTTPResponseFromError(err) + if !ok { + response = &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(err.Error()), + } + } + } + + // Ensure responses that are too big are not retried. + if len(response.Body) >= w.maxMessageSize { + level.Error(logger).Log("msg", "response larger than max message size", "size", len(response.Body), "maxMessageSize", w.maxMessageSize) + + errMsg := fmt.Sprintf("response larger than the max message size (%d vs %d)", len(response.Body), w.maxMessageSize) + response = &httpgrpc.HTTPResponse{ + Code: http.StatusRequestEntityTooLarge, + Body: []byte(errMsg), + } + } + + c, err := w.frontendPool.GetClientFor(frontendAddress) + if err == nil { + // Response is empty and uninteresting. + _, err = c.(FrontendForQuerierClient).QueryResult(ctx, &QueryResultRequest{ + QueryID: queryID, + HttpResponse: response, + }) + } + if err != nil { + level.Error(logger).Log("msg", "error notifying frontend about finished query", "err", err, "frontend", frontendAddress) + } +} + +type frontendClient struct { + FrontendForQuerierClient + grpc_health_v1.HealthClient + conn *grpc.ClientConn +} + +func (fc *frontendClient) Close() error { + return fc.conn.Close() +} diff --git a/pkg/querier/frontend2/scheduler.go b/pkg/querier/frontend2/scheduler.go new file mode 100644 index 00000000000..024a55ebd37 --- /dev/null +++ b/pkg/querier/frontend2/scheduler.go @@ -0,0 +1,510 @@ +package frontend2 + +import ( + "context" + "errors" + "flag" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "go.uber.org/atomic" + + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + errTooManyRequests = errors.New("too many outstanding requests") + errSchedulerIsNotRunning = errors.New("scheduler is not running") +) + +// Scheduler is responsible for queueing and dispatching queries to Queriers. +type Scheduler struct { + services.Service + + log log.Logger + + limits Limits + + connectedFrontendsMu sync.Mutex + connectedFrontends map[string]*connectedFrontend + + connectedQuerierWorkers *atomic.Int32 + + mtx sync.Mutex + cond *sync.Cond // Notified when request is enqueued or dequeued, or querier is disconnected. + queues *queues + pendingRequests map[requestKey]*schedulerRequest // Request is kept in this map even after being dispatched to querier. It can still be canceled at that time. + + // Metrics. + connectedWorkers prometheus.GaugeFunc + queueDuration prometheus.Histogram + queueLength *prometheus.GaugeVec +} + +type requestKey struct { + frontendAddr string + queryID uint64 +} + +type connectedFrontend struct { + connections int + + // This context is used for running all queries from the same frontend. + // When last frontend connection is closed, context is canceled. + ctx context.Context + cancel context.CancelFunc +} + +type SchedulerConfig struct { + MaxOutstandingPerTenant int +} + +func (cfg *SchedulerConfig) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler; requests beyond this error with HTTP 429.") +} + +// NewScheduler creates a new Scheduler. +func NewScheduler(cfg SchedulerConfig, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Scheduler, error) { + connectedQuerierWorkers := atomic.NewInt32(0) + s := &Scheduler{ + log: log, + limits: limits, + + queues: newUserQueues(cfg.MaxOutstandingPerTenant), + pendingRequests: map[requestKey]*schedulerRequest{}, + + queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_query_scheduler_queue_duration_seconds", + Help: "Time spend by requests queued.", + Buckets: prometheus.DefBuckets, + }), + connectedWorkers: promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_scheduler_connected_workers", + Help: "Number of worker clients currently connected to the frontend.", + }, func() float64 { return float64(connectedQuerierWorkers.Load()) }), + queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_query_scheduler_queue_length", + Help: "Number of queries in the queue.", + }, []string{"user"}), + + connectedFrontends: map[string]*connectedFrontend{}, + connectedQuerierWorkers: connectedQuerierWorkers, + } + s.cond = sync.NewCond(&s.mtx) + + s.Service = services.NewIdleService(nil, s.stopping) + return s, nil +} + +// Used to transfer trace information from/to HTTP request. +type httpgrpcHeadersCarrier httpgrpc.HTTPRequest + +func (c *httpgrpcHeadersCarrier) Set(key, val string) { + c.Headers = append(c.Headers, &httpgrpc.Header{ + Key: key, + Values: []string{val}, + }) +} + +func (c *httpgrpcHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for _, h := range c.Headers { + for _, v := range h.Values { + if err := handler(h.Key, v); err != nil { + return err + } + } + } + return nil +} + +func getParentSpanForRequest(tracer opentracing.Tracer, req *httpgrpc.HTTPRequest) (opentracing.SpanContext, error) { + if tracer == nil { + return nil, nil + } + + carrier := (*httpgrpcHeadersCarrier)(req) + extracted, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + if err == opentracing.ErrSpanContextNotFound { + err = nil + } + return extracted, err +} + +// Limits needed for the Query Frontend - interface used for decoupling. +type Limits interface { + // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. + MaxQueriersPerUser(user string) int +} + +type schedulerRequest struct { + frontendAddress string + userID string + queryID uint64 + request *httpgrpc.HTTPRequest + + enqueueTime time.Time + + ctx context.Context + ctxCancel context.CancelFunc + queueSpan opentracing.Span + + // This is only used for testing. + parentSpanContext opentracing.SpanContext +} + +// This method handles connection from frontend. +func (s *Scheduler) FrontendLoop(frontend SchedulerForFrontend_FrontendLoopServer) error { + frontendAddress, frontendCtx, err := s.frontendConnected(frontend) + if err != nil { + return err + } + defer s.frontendDisconnected(frontendAddress) + + // We stop accepting new queries in Stopping state. By returning quickly, we disconnect frontends, which in turns + // cancels all their queries. + for s.State() == services.Running { + msg, err := frontend.Recv() + if err != nil { + return err + } + + if s.State() != services.Running { + break // break out of the loop, and send SHUTTING_DOWN message. + } + + var resp *SchedulerToFrontend + + switch msg.GetType() { + case ENQUEUE: + err = s.enqueueRequest(frontendCtx, frontendAddress, msg) + switch { + case err == nil: + resp = &SchedulerToFrontend{Status: OK} + case err == errTooManyRequests: + resp = &SchedulerToFrontend{Status: TOO_MANY_REQUESTS_PER_TENANT} + default: + resp = &SchedulerToFrontend{Status: ERROR, Error: err.Error()} + } + + case CANCEL: + s.cancelRequest(frontendAddress, msg.QueryID) + resp = &SchedulerToFrontend{Status: OK} + + default: + level.Error(s.log).Log("msg", "unknown request type from frontend", "addr", frontendAddress, "type", msg.GetType()) + return errors.New("unknown request type") + } + + err = frontend.Send(resp) + // Failure to send response results in ending this connection. + if err != nil { + return err + } + } + + // Report shutdown back to frontend, so that it can retry with different scheduler. Also stop the frontend loop. + return frontend.Send(&SchedulerToFrontend{Status: SHUTTING_DOWN}) +} + +func (s *Scheduler) frontendConnected(frontend SchedulerForFrontend_FrontendLoopServer) (string, context.Context, error) { + msg, err := frontend.Recv() + if err != nil { + return "", nil, err + } + if msg.Type != INIT || msg.FrontendAddress == "" { + return "", nil, errors.New("no frontend address") + } + + s.connectedFrontendsMu.Lock() + defer s.connectedFrontendsMu.Unlock() + + cf := s.connectedFrontends[msg.FrontendAddress] + if cf == nil { + cf = &connectedFrontend{ + connections: 0, + } + cf.ctx, cf.cancel = context.WithCancel(context.Background()) + s.connectedFrontends[msg.FrontendAddress] = cf + } + + cf.connections++ + return msg.FrontendAddress, cf.ctx, nil +} + +func (s *Scheduler) frontendDisconnected(frontendAddress string) { + s.connectedFrontendsMu.Lock() + defer s.connectedFrontendsMu.Unlock() + + cf := s.connectedFrontends[frontendAddress] + cf.connections-- + if cf.connections == 0 { + delete(s.connectedFrontends, frontendAddress) + cf.cancel() + } +} + +func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr string, msg *FrontendToScheduler) error { + // Create new context for this request, to support cancellation. + ctx, cancel := context.WithCancel(frontendContext) + shouldCancel := true + defer func() { + if shouldCancel { + cancel() + } + }() + + // Extract tracing information from headers in HTTP request. FrontendContext doesn't have the correct tracing + // information, since that is a long-running request. + tracer := opentracing.GlobalTracer() + parentSpanContext, err := getParentSpanForRequest(tracer, msg.HttpRequest) + if err != nil { + return err + } + + userID := msg.GetUserID() + + req := &schedulerRequest{ + frontendAddress: frontendAddr, + userID: msg.UserID, + queryID: msg.QueryID, + request: msg.HttpRequest, + } + + req.parentSpanContext = parentSpanContext + req.queueSpan, req.ctx = opentracing.StartSpanFromContextWithTracer(ctx, tracer, "queued", opentracing.ChildOf(parentSpanContext)) + req.enqueueTime = time.Now() + req.ctxCancel = cancel + + maxQueriers := s.limits.MaxQueriersPerUser(userID) + + s.mtx.Lock() + defer s.mtx.Unlock() + + queue := s.queues.getOrAddQueue(userID, maxQueriers) + if queue == nil { + // This can only happen if userID is "". + return errors.New("no queue found") + } + + select { + case queue <- req: + shouldCancel = false + s.pendingRequests[requestKey{frontendAddr: frontendAddr, queryID: msg.QueryID}] = req + s.queueLength.WithLabelValues(userID).Inc() + s.cond.Broadcast() + return nil + default: + return errTooManyRequests + } +} + +// This method doesn't do removal from the queue. That will be handled later by getNextRequestForQuerier when it finds +// this request with canceled context. +func (s *Scheduler) cancelRequest(frontendAddr string, queryID uint64) { + s.mtx.Lock() + defer s.mtx.Unlock() + + key := requestKey{frontendAddr: frontendAddr, queryID: queryID} + req := s.pendingRequests[key] + if req != nil { + req.ctxCancel() + } + delete(s.pendingRequests, key) +} + +// QuerierLoop is started by querier to receive queries from scheduler. +func (s *Scheduler) QuerierLoop(querier SchedulerForQuerier_QuerierLoopServer) error { + resp, err := querier.Recv() + if err != nil { + return err + } + + querierID := resp.GetQuerierID() + + s.registerQuerierConnection(querierID) + defer s.unregisterQuerierConnection(querierID) + + // If the downstream connection to querier is cancelled, + // we need to ping the condition variable to unblock getNextRequestForQuerier. + // Ideally we'd have ctx aware condition variables... + go func() { + <-querier.Context().Done() + s.cond.Broadcast() + }() + + lastUserIndex := -1 + + // In stopping state scheduler is not accepting new queries, but still dispatching queries in the queues. + for s.isRunningOrStopping() { + req, idx, err := s.getNextRequestForQuerier(querier.Context(), lastUserIndex, querierID) + if err != nil { + return err + } + lastUserIndex = idx + + if err := s.forwardRequestToQuerier(querier, req); err != nil { + return err + } + } + + return errSchedulerIsNotRunning +} + +func (s *Scheduler) forwardRequestToQuerier(querier SchedulerForQuerier_QuerierLoopServer, req *schedulerRequest) error { + // Make sure to cancel request at the end to cleanup resources. + defer s.cancelRequest(req.frontendAddress, req.queryID) + + // Handle the stream sending & receiving on a goroutine so we can + // monitoring the contexts in a select and cancel things appropriately. + errCh := make(chan error, 1) + go func() { + err := querier.Send(&SchedulerToQuerier{ + UserID: req.userID, + QueryID: req.queryID, + FrontendAddress: req.frontendAddress, + HttpRequest: req.request, + }) + if err != nil { + errCh <- err + return + } + + _, err = querier.Recv() + errCh <- err + }() + + select { + case <-req.ctx.Done(): + // If the upstream request is cancelled (eg. frontend issued CANCEL or closed connection), + // we need to cancel the downstream req. Only way we can do that is to close the stream (by returning error here). + // Querier is expecting this semantics. + return req.ctx.Err() + + case err := <-errCh: + // Is there was an error handling this request due to network IO, + // then error out this upstream request _and_ stream. + // TODO: if err is not nil, scheduler should notify frontend using the frontend address. + return err + } +} + +// getQueue picks a random queue and takes the next unexpired request off of it, so we +// fairly process users queries. Will block if there are no requests. +func (s *Scheduler) getNextRequestForQuerier(ctx context.Context, lastUserIndex int, querierID string) (*schedulerRequest, int, error) { + s.mtx.Lock() + defer s.mtx.Unlock() + + querierWait := false + +FindQueue: + // We need to wait if there are no users, or no pending requests for given querier. + for (s.queues.len() == 0 || querierWait) && ctx.Err() == nil && s.isRunningOrStopping() { + querierWait = false + s.cond.Wait() + } + + if err := ctx.Err(); err != nil { + return nil, lastUserIndex, err + } + + if !s.isRunningOrStopping() { + return nil, lastUserIndex, errSchedulerIsNotRunning + } + + for { + queue, userID, idx := s.queues.getNextQueueForQuerier(lastUserIndex, querierID) + lastUserIndex = idx + if queue == nil { + break + } + /* + We want to dequeue the next unexpired request from the chosen tenant queue. + The chance of choosing a particular tenant for dequeueing is (1/active_tenants). + This is problematic under load, especially with other middleware enabled such as + querier.split-by-interval, where one request may fan out into many. + If expired requests aren't exhausted before checking another tenant, it would take + n_active_tenants * n_expired_requests_at_front_of_queue requests being processed + before an active request was handled for the tenant in question. + If this tenant meanwhile continued to queue requests, + it's possible that it's own queue would perpetually contain only expired requests. + */ + + // Pick the first non-expired request from this user's queue (if any). + for { + lastRequest := false + request := <-queue + if len(queue) == 0 { + s.queues.deleteQueue(userID) + lastRequest = true + } + + // Tell close() we've processed a request. + s.cond.Broadcast() + + s.queueDuration.Observe(time.Since(request.enqueueTime).Seconds()) + s.queueLength.WithLabelValues(userID).Dec() + request.queueSpan.Finish() + + // Ensure the request has not already expired. + if request.ctx.Err() == nil { + return request, lastUserIndex, nil + } + + // Make sure cancel is called for all requests. + request.ctxCancel() + delete(s.pendingRequests, requestKey{request.frontendAddress, request.queryID}) + + // Stop iterating on this queue if we've just consumed the last request. + if lastRequest { + break + } + } + } + + // There are no unexpired requests, so we can get back + // and wait for more requests. + querierWait = true + goto FindQueue +} + +func (s *Scheduler) isRunningOrStopping() bool { + st := s.State() + return st == services.Running || st == services.Stopping +} + +// Close the Scheduler. +func (s *Scheduler) stopping(_ error) error { + s.mtx.Lock() + defer s.mtx.Unlock() + + for s.queues.len() > 0 && s.connectedQuerierWorkers.Load() > 0 { + s.cond.Wait() + } + + // If there are still queriers waiting for requests, they get notified. + // (They would also be notified if gRPC server shuts down). + s.cond.Broadcast() + return nil +} + +func (s *Scheduler) registerQuerierConnection(querier string) { + s.connectedQuerierWorkers.Inc() + + s.mtx.Lock() + defer s.mtx.Unlock() + s.queues.addQuerierConnection(querier) +} + +func (s *Scheduler) unregisterQuerierConnection(querier string) { + s.connectedQuerierWorkers.Dec() + + s.mtx.Lock() + defer s.mtx.Unlock() + s.queues.removeQuerierConnection(querier) +} diff --git a/pkg/querier/frontend2/scheduler_test.go b/pkg/querier/frontend2/scheduler_test.go new file mode 100644 index 00000000000..f590f3bc3f7 --- /dev/null +++ b/pkg/querier/frontend2/scheduler_test.go @@ -0,0 +1,379 @@ +package frontend2 + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-client-go/config" + "github.com/weaveworks/common/httpgrpc" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/test" +) + +const testMaxOutstandingPerTenant = 5 + +func setupScheduler(t *testing.T) (*Scheduler, SchedulerForFrontendClient, SchedulerForQuerierClient) { + s, err := NewScheduler(SchedulerConfig{MaxOutstandingPerTenant: testMaxOutstandingPerTenant}, &limits{queriers: 2}, log.NewNopLogger(), nil) + require.NoError(t, err) + + server := grpc.NewServer() + RegisterSchedulerForFrontendServer(server, s) + RegisterSchedulerForQuerierServer(server, s) + + require.NoError(t, services.StartAndAwaitRunning(context.Background(), s)) + t.Cleanup(func() { + _ = services.StopAndAwaitTerminated(context.Background(), s) + }) + + l, err := net.Listen("tcp", "") + require.NoError(t, err) + + go func() { + _ = server.Serve(l) + }() + + t.Cleanup(func() { + _ = l.Close() + }) + + c, err := grpc.Dial(l.Addr().String(), grpc.WithInsecure()) + require.NoError(t, err) + + t.Cleanup(func() { + _ = c.Close() + }) + + return s, NewSchedulerForFrontendClient(c), NewSchedulerForQuerierClient(c) +} + +func TestSchedulerBasicEnqueue(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + }) + + { + querierLoop, err := querierClient.QuerierLoop(context.Background()) + require.NoError(t, err) + require.NoError(t, querierLoop.Send(&QuerierToScheduler{QuerierID: "querier-1"})) + + msg2, err := querierLoop.Recv() + require.NoError(t, err) + require.Equal(t, uint64(1), msg2.QueryID) + require.Equal(t, "frontend-12345", msg2.FrontendAddress) + require.Equal(t, "GET", msg2.HttpRequest.Method) + require.Equal(t, "/hello", msg2.HttpRequest.Url) + require.NoError(t, querierLoop.Send(&QuerierToScheduler{})) + } + + verifyNoPendingRequestsLeft(t, scheduler) +} + +func TestSchedulerEnqueueWithCancel(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + }) + + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: CANCEL, + QueryID: 1, + }) + + querierLoop := initQuerierLoop(t, querierClient, "querier-1") + + verifyQuerierDoesntReceiveRequest(t, querierLoop, 500*time.Millisecond) + verifyNoPendingRequestsLeft(t, scheduler) +} + +func initQuerierLoop(t *testing.T, querierClient SchedulerForQuerierClient, querier string) SchedulerForQuerier_QuerierLoopClient { + querierLoop, err := querierClient.QuerierLoop(context.Background()) + require.NoError(t, err) + require.NoError(t, querierLoop.Send(&QuerierToScheduler{QuerierID: querier})) + + return querierLoop +} + +func TestSchedulerEnqueueByMultipleFrontendsWithCancel(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop1 := initFrontendLoop(t, frontendClient, "frontend-1") + frontendLoop2 := initFrontendLoop(t, frontendClient, "frontend-2") + + frontendToScheduler(t, frontendLoop1, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello1"}, + }) + + frontendToScheduler(t, frontendLoop2, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello2"}, + }) + + // Cancel first query by first frontend. + frontendToScheduler(t, frontendLoop1, &FrontendToScheduler{ + Type: CANCEL, + QueryID: 1, + }) + + querierLoop := initQuerierLoop(t, querierClient, "querier-1") + + // Let's verify that we can receive query 1 from frontend-2. + msg, err := querierLoop.Recv() + require.NoError(t, err) + require.Equal(t, uint64(1), msg.QueryID) + require.Equal(t, "frontend-2", msg.FrontendAddress) + // Must notify scheduler back about finished processing, or it will not send more requests (nor remove "current" request from pending ones). + require.NoError(t, querierLoop.Send(&QuerierToScheduler{})) + + // But nothing else. + verifyQuerierDoesntReceiveRequest(t, querierLoop, 500*time.Millisecond) + verifyNoPendingRequestsLeft(t, scheduler) +} + +func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + }) + + querierLoop := initQuerierLoop(t, querierClient, "querier-1") + + // Disconnect frontend. + require.NoError(t, frontendLoop.CloseSend()) + + verifyQuerierDoesntReceiveRequest(t, querierLoop, 500*time.Millisecond) + verifyNoPendingRequestsLeft(t, scheduler) +} + +func TestCancelRequestInProgress(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + }) + + querierLoop, err := querierClient.QuerierLoop(context.Background()) + require.NoError(t, err) + require.NoError(t, querierLoop.Send(&QuerierToScheduler{QuerierID: "querier-1"})) + + _, err = querierLoop.Recv() + require.NoError(t, err) + + // At this point, scheduler assumes that querier is processing the request (until it receives empty QuerierToScheduler message back). + // Simulate frontend disconnect. + require.NoError(t, frontendLoop.CloseSend()) + + // Add a little sleep to make sure that scheduler notices frontend disconnect. + time.Sleep(500 * time.Millisecond) + + // Report back end of request processing. This should return error, since the QuerierLoop call has finished on scheduler. + // Note: testing on querierLoop.Context() cancellation didn't work :( + err = querierLoop.Send(&QuerierToScheduler{}) + require.Error(t, err) + + verifyNoPendingRequestsLeft(t, scheduler) +} + +func TestTracingContext(t *testing.T) { + scheduler, frontendClient, _ := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + + closer, err := config.Configuration{}.InitGlobalTracer("test") + require.NoError(t, err) + defer closer.Close() + + req := &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + FrontendAddress: "frontend-12345", + } + + sp, _ := opentracing.StartSpanFromContext(context.Background(), "client") + opentracing.GlobalTracer().Inject(sp.Context(), opentracing.HTTPHeaders, (*httpgrpcHeadersCarrier)(req.HttpRequest)) + + frontendToScheduler(t, frontendLoop, req) + + scheduler.mtx.Lock() + defer scheduler.mtx.Unlock() + require.Equal(t, 1, len(scheduler.pendingRequests)) + + for _, r := range scheduler.pendingRequests { + require.NotNil(t, r.parentSpanContext) + } +} + +func TestSchedulerShutdown_FrontendLoop(t *testing.T) { + scheduler, frontendClient, _ := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + + // Stop the scheduler. This will disable receiving new requests from frontends. + scheduler.StopAsync() + + // We can still send request to scheduler, but we get shutdown error back. + require.NoError(t, frontendLoop.Send(&FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + })) + + msg, err := frontendLoop.Recv() + require.NoError(t, err) + require.True(t, msg.Status == SHUTTING_DOWN) +} + +func TestSchedulerShutdown_QuerierLoop(t *testing.T) { + scheduler, frontendClient, querierClient := setupScheduler(t) + + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + frontendToScheduler(t, frontendLoop, &FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + }) + + // Scheduler now has 1 query. Let's connect querier and fetch it. + + querierLoop, err := querierClient.QuerierLoop(context.Background()) + require.NoError(t, err) + require.NoError(t, querierLoop.Send(&QuerierToScheduler{QuerierID: "querier-1"})) + + // Dequeue first query. + _, err = querierLoop.Recv() + require.NoError(t, err) + + scheduler.StopAsync() + + // Unblock scheduler loop, to find next request. + err = querierLoop.Send(&QuerierToScheduler{}) + require.NoError(t, err) + + // This should now return with error, since scheduler is going down. + _, err = querierLoop.Recv() + require.Error(t, err) +} + +func TestSchedulerMaxOutstandingRequests(t *testing.T) { + _, frontendClient, _ := setupScheduler(t) + + for i := 0; i < testMaxOutstandingPerTenant; i++ { + // coming from different frontends + fl := initFrontendLoop(t, frontendClient, fmt.Sprintf("frontend-%d", i)) + require.NoError(t, fl.Send(&FrontendToScheduler{ + Type: ENQUEUE, + QueryID: uint64(i), + UserID: "test", // for same user. + HttpRequest: &httpgrpc.HTTPRequest{}, + })) + + msg, err := fl.Recv() + require.NoError(t, err) + require.True(t, msg.Status == OK) + } + + // One more query from the same user will trigger an error. + fl := initFrontendLoop(t, frontendClient, "extra-frontend") + require.NoError(t, fl.Send(&FrontendToScheduler{ + Type: ENQUEUE, + QueryID: 0, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "GET", Url: "/hello"}, + })) + + msg, err := fl.Recv() + require.NoError(t, err) + require.True(t, msg.Status == TOO_MANY_REQUESTS_PER_TENANT) +} + +func initFrontendLoop(t *testing.T, client SchedulerForFrontendClient, frontendAddr string) SchedulerForFrontend_FrontendLoopClient { + loop, err := client.FrontendLoop(context.Background()) + require.NoError(t, err) + + require.NoError(t, loop.Send(&FrontendToScheduler{ + Type: INIT, + FrontendAddress: frontendAddr, + })) + + return loop +} + +func frontendToScheduler(t *testing.T, frontendLoop SchedulerForFrontend_FrontendLoopClient, req *FrontendToScheduler) { + require.NoError(t, frontendLoop.Send(req)) + msg, err := frontendLoop.Recv() + require.NoError(t, err) + require.True(t, msg.Status == OK) +} + +// If this verification succeeds, there will be leaked goroutine left behind. It will be cleaned once grpc server is shut down. +func verifyQuerierDoesntReceiveRequest(t *testing.T, querierLoop SchedulerForQuerier_QuerierLoopClient, timeout time.Duration) { + ch := make(chan interface{}, 1) + + go func() { + m, e := querierLoop.Recv() + if e != nil { + ch <- e + } else { + ch <- m + } + }() + + select { + case val := <-ch: + require.Failf(t, "expected timeout", "got %v", val) + case <-time.After(timeout): + return + } +} + +func verifyNoPendingRequestsLeft(t *testing.T, scheduler *Scheduler) { + test.Poll(t, 1*time.Second, 0, func() interface{} { + scheduler.mtx.Lock() + defer scheduler.mtx.Unlock() + return len(scheduler.pendingRequests) + }) +} + +type limits struct { + queriers int +} + +func (l limits) MaxQueriersPerUser(_ string) int { + return l.queriers +} diff --git a/pkg/querier/queryrange/retry.go b/pkg/querier/queryrange/retry.go index 38d3bb15c9d..32d16b92171 100644 --- a/pkg/querier/queryrange/retry.go +++ b/pkg/querier/queryrange/retry.go @@ -8,6 +8,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/util" ) type RetryMiddlewareMetrics struct { @@ -68,7 +70,7 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { httpResp, ok := httpgrpc.HTTPResponseFromError(err) if !ok || httpResp.Code/100 == 5 { lastErr = err - level.Error(r.log).Log("msg", "error processing request", "try", tries, "err", err) + level.Error(util.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "err", err) continue } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index e890c218455..1ba2b980769 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -34,7 +34,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/querier/frontend" ) const day = 24 * time.Hour @@ -126,6 +125,17 @@ func MergeMiddlewares(middleware ...Middleware) Middleware { }) } +// Tripperware is a signature for all http client-side middleware. +type Tripperware func(http.RoundTripper) http.RoundTripper + +// RoundTripFunc is to http.RoundTripper what http.HandlerFunc is to http.Handler. +type RoundTripFunc func(*http.Request) (*http.Response, error) + +// RoundTrip implements http.RoundTripper. +func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} + // NewTripperware returns a Tripperware configured with middlewares to limit, align, split, retry and cache requests. func NewTripperware( cfg Config, @@ -138,7 +148,7 @@ func NewTripperware( minShardingLookback time.Duration, registerer prometheus.Registerer, cacheGenNumberLoader CacheGenNumberLoader, -) (frontend.Tripperware, cache.Cache, error) { +) (Tripperware, cache.Cache, error) { // Per tenant query metrics. queriesPerTenant := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", @@ -196,11 +206,11 @@ func NewTripperware( queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("retry", metrics), NewRetryMiddleware(log, cfg.MaxRetries, NewRetryMiddlewareMetrics(registerer))) } - return frontend.Tripperware(func(next http.RoundTripper) http.RoundTripper { + return func(next http.RoundTripper) http.RoundTripper { // Finally, if the user selected any query range middleware, stitch it in. if len(queryRangeMiddleware) > 0 { queryrange := NewRoundTripper(next, codec, queryRangeMiddleware...) - return frontend.RoundTripFunc(func(r *http.Request) (*http.Response, error) { + return RoundTripFunc(func(r *http.Request) (*http.Response, error) { isQueryRange := strings.HasSuffix(r.URL.Path, "/query_range") op := "query" if isQueryRange { @@ -221,7 +231,7 @@ func NewTripperware( }) } return next - }), c, nil + }, c, nil } type roundTripper struct { diff --git a/pkg/util/fakeauth/fake_auth.go b/pkg/util/fakeauth/fake_auth.go index 42557e15240..ee850e80451 100644 --- a/pkg/util/fakeauth/fake_auth.go +++ b/pkg/util/fakeauth/fake_auth.go @@ -15,19 +15,27 @@ import ( // SetupAuthMiddleware for the given server config. func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []string) middleware.Interface { if enabled { - config.GRPCMiddleware = append(config.GRPCMiddleware, - middleware.ServerUserHeaderInterceptor, - ) + ignoredMethods := map[string]bool{} + for _, m := range noGRPCAuthOn { + ignoredMethods[m] = true + } + + config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if ignoredMethods[info.FullMethod] { + return handler(ctx, req) + } + return middleware.ServerUserHeaderInterceptor(ctx, req, info, handler) + }) + config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - for _, path := range noGRPCAuthOn { - if info.FullMethod == path { - return handler(srv, ss) - } + if ignoredMethods[info.FullMethod] { + return handler(srv, ss) } return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) }, ) + return middleware.AuthenticateUser } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index f89aeca6565..73349b2d887 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -125,7 +125,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") - f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends). This option only works with queriers connecting to the query-frontend, not when using downstream URL.") + f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") f.DurationVar(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") diff --git a/tools/doc-generator/main.go b/tools/doc-generator/main.go index 4fc6a60a760..1d7cce1db63 100644 --- a/tools/doc-generator/main.go +++ b/tools/doc-generator/main.go @@ -67,7 +67,7 @@ var ( }, { name: "query_frontend_config", - structType: reflect.TypeOf(frontend.Config{}), + structType: reflect.TypeOf(frontend.CombinedFrontendConfig{}), desc: "The query_frontend_config configures the Cortex query-frontend.", }, { @@ -112,7 +112,7 @@ var ( }, { name: "frontend_worker_config", - structType: reflect.TypeOf(frontend.WorkerConfig{}), + structType: reflect.TypeOf(frontend.CombinedWorkerConfig{}), desc: "The frontend_worker_config configures the worker - running within the Cortex querier - picking up and executing queries enqueued by the query-frontend.", }, { From ce12f7cda2d5c0b37d6cf363a142c3ef8eac1bf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Tue, 27 Oct 2020 13:02:52 +0100 Subject: [PATCH 2/8] Fix roundtripper wrapping. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/cortex/modules.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 1eb6714d8d2..4d7e229ecfe 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -509,7 +509,7 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { } // Wrap roundtripper into Tripperware. - t.QueryFrontendTripperware(roundTripper) + roundTripper = t.QueryFrontendTripperware(roundTripper) handler := frontend.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util.Logger) if t.Cfg.Frontend.CompressResponses { From 2dc631c72ea31f3948453ed16aefc8692e2de58b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Tue, 27 Oct 2020 21:14:04 +0100 Subject: [PATCH 3/8] Review feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/querier/frontend/handler.go | 1 - pkg/querier/frontend2/frontend2.go | 8 ++++---- pkg/querier/frontend2/frontend_scheduler_worker.go | 2 +- pkg/querier/frontend2/querier_scheduler_worker.go | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/querier/frontend/handler.go b/pkg/querier/frontend/handler.go index 411f71f6920..7b3ed90ba19 100644 --- a/pkg/querier/frontend/handler.go +++ b/pkg/querier/frontend/handler.go @@ -118,7 +118,6 @@ func (f *Handler) reportSlowQuery(queryResponseTime time.Duration, r *http.Reque } level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) - } func writeError(w http.ResponseWriter, err error) { diff --git a/pkg/querier/frontend2/frontend2.go b/pkg/querier/frontend2/frontend2.go index b3bfcbb8d8f..b46cc50a3ea 100644 --- a/pkg/querier/frontend2/frontend2.go +++ b/pkg/querier/frontend2/frontend2.go @@ -188,13 +188,13 @@ enqueueAgain: select { case <-ctx.Done(): return nil, ctx.Err() - case er := <-freq.enqueue: - if er.success { - cancelCh = er.cancelCh + case enqRes := <-freq.enqueue: + if enqRes.success { + cancelCh = enqRes.cancelCh break } - if er.retry { + if enqRes.retry { retries-- if retries > 0 { goto enqueueAgain diff --git a/pkg/querier/frontend2/frontend_scheduler_worker.go b/pkg/querier/frontend2/frontend_scheduler_worker.go index cb0095e72ca..bd2837cedb3 100644 --- a/pkg/querier/frontend2/frontend_scheduler_worker.go +++ b/pkg/querier/frontend2/frontend_scheduler_worker.go @@ -124,7 +124,7 @@ func (f *frontendSchedulerWorkers) getWorkersCount() int { } func (f *frontendSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { - // Because we only use single long-running method, it doesn't make sense to inect user ID, send over tracing or add metrics. + // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) if err != nil { return nil, err diff --git a/pkg/querier/frontend2/querier_scheduler_worker.go b/pkg/querier/frontend2/querier_scheduler_worker.go index 9f5ec53c54c..6daf84b1d64 100644 --- a/pkg/querier/frontend2/querier_scheduler_worker.go +++ b/pkg/querier/frontend2/querier_scheduler_worker.go @@ -231,7 +231,7 @@ func (f *querierSchedulerWorkers) createFrontendClient(addr string) (client.Pool } func (f *querierSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { - // Because we only use single long-running method, it doesn't make sense to inect user ID, send over tracing or add metrics. + // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) if err != nil { return nil, err From 69a8492df28c9df74b3498ec0fa8112f5c4e3f36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 29 Oct 2020 23:03:47 +0100 Subject: [PATCH 4/8] Review feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- docs/guides/shuffle-sharding.md | 2 +- pkg/cortex/cortex.go | 14 ++--- pkg/cortex/modules.go | 8 +-- pkg/querier/frontend/config.go | 46 +++++++++------ pkg/querier/frontend/frontend_test.go | 6 +- pkg/querier/frontend2/frontend.proto | 14 +++-- pkg/querier/frontend2/frontend2.go | 59 ++++++++++++------- pkg/querier/frontend2/frontend2_test.go | 2 +- .../frontend2/frontend_scheduler_worker.go | 20 +++---- .../frontend2/querier_scheduler_worker.go | 14 ++--- pkg/querier/frontend2/scheduler.go | 8 +-- .../frontend2 => util}/dns_watcher.go | 4 +- 12 files changed, 115 insertions(+), 82 deletions(-) rename pkg/{querier/frontend2 => util}/dns_watcher.go (93%) diff --git a/docs/guides/shuffle-sharding.md b/docs/guides/shuffle-sharding.md index 7a5e9568d20..b0a110b5e5d 100644 --- a/docs/guides/shuffle-sharding.md +++ b/docs/guides/shuffle-sharding.md @@ -80,7 +80,7 @@ _The shard size can be overridden on a per-tenant basis in the limits overrides By default all Cortex queriers can execute received queries for given tenant. -When shuffle sharding is **enabled** by setting `-frontend.max-queriers-per-tenant` (or its respective YAML config option) to a value higher than 0 and lower than the number of available queriers, only specified number of queriers will execute queries for single tenant. Note that this distribution happens in query-frontend, or query-scheduler if used. When not using query-frontend, this option is not available. +When shuffle sharding is **enabled** by setting `-frontend.max-queriers-per-tenant` (or its respective YAML config option) to a value higher than 0 and lower than the number of available queriers, only specified number of queriers will execute queries for single tenant. Note that this distribution happens in query-frontend, or query-scheduler if used. When using query-scheduler, `-frontend.max-queriers-per-tenant` option must be set for query-scheduler component. When not using query-frontend (with or without scheduler), this option is not available. _The maximum number of queriers can be overridden on a per-tenant basis in the limits overrides configuration._ diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 29b30899641..c6812343b35 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -100,12 +100,12 @@ type Config struct { StoreGateway storegateway.Config `yaml:"store_gateway"` PurgerConfig purger.Config `yaml:"purger"` - Ruler ruler.Config `yaml:"ruler"` - Configs configs.Config `yaml:"configs"` - Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` - RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` - MemberlistKV memberlist.KVConfig `yaml:"memberlist"` - QuerySchedulerConfig frontend2.SchedulerConfig `yaml:"query_scheduler"` + Ruler ruler.Config `yaml:"ruler"` + Configs configs.Config `yaml:"configs"` + Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` + RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + QueryScheduler frontend2.SchedulerConfig `yaml:"query_scheduler"` } // RegisterFlags registers flag. @@ -151,7 +151,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Alertmanager.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) c.MemberlistKV.RegisterFlags(f, "") - c.QuerySchedulerConfig.RegisterFlags(f) + c.QueryScheduler.RegisterFlags(f) // These don't seem to have a home. f.IntVar(&chunk_util.QueryParallelism, "querier.query-parallelism", 100, "Max subqueries run in parallel per higher-level query.") diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 4d7e229ecfe..e90ad2e0e8b 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -284,7 +284,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { } else { // Single binary mode requires a query frontend endpoint for the worker. If no frontend or scheduler endpoint // is configured, Cortex will default to using frontend on localhost on it's own GRPC listening port. - if t.Cfg.Worker.WorkerV1.FrontendAddress == "" || t.Cfg.Worker.WorkerV2.SchedulerAddr == "" { + if t.Cfg.Worker.WorkerV1.FrontendAddress == "" || t.Cfg.Worker.WorkerV2.SchedulerAddress == "" { address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) t.Cfg.Worker.WorkerV1.FrontendAddress = address @@ -297,7 +297,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { } // If neither frontend address or scheduler address is configured, no worker will be created. - return t.Cfg.Worker.InitQuerierWorker(t.Cfg.Querier, internalQuerierRouter, util.Logger) + return frontend.InitQuerierWorker(t.Cfg.Worker, t.Cfg.Querier, internalQuerierRouter, util.Logger) } func (t *Cortex) initStoreQueryables() (services.Service, error) { @@ -503,7 +503,7 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro } func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { - roundTripper, frontendV1, frontendV2, err := t.Cfg.Frontend.InitFrontend(t.Overrides, t.Cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) + roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend(t.Cfg.Frontend, t.Overrides, t.Cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -739,7 +739,7 @@ func (t *Cortex) initPurger() (services.Service, error) { } func (t *Cortex) initQueryScheduler() (services.Service, error) { - s, err := frontend2.NewScheduler(t.Cfg.QuerySchedulerConfig, t.Overrides, util.Logger, prometheus.DefaultRegisterer) + s, err := frontend2.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, errors.Wrap(err, "query-scheduler init") } diff --git a/pkg/querier/frontend/config.go b/pkg/querier/frontend/config.go index 697dc3cad4a..d615d1a82cd 100644 --- a/pkg/querier/frontend/config.go +++ b/pkg/querier/frontend/config.go @@ -18,9 +18,9 @@ import ( // This struct combines several configuration options together to preserve backwards compatibility. type CombinedFrontendConfig struct { - Handler HandlerConfig `yaml:",inline"` - Frontend Config `yaml:",inline"` - Frontend2 frontend2.Config `yaml:",inline"` + Handler HandlerConfig `yaml:",inline"` + FrontendV1 Config `yaml:",inline"` + FrontendV2 frontend2.Config `yaml:",inline"` CompressResponses bool `yaml:"compress_responses"` DownstreamURL string `yaml:"downstream_url"` @@ -28,41 +28,47 @@ type CombinedFrontendConfig struct { func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { cfg.Handler.RegisterFlags(f) - cfg.Frontend.RegisterFlags(f) - cfg.Frontend2.RegisterFlags(f) + cfg.FrontendV1.RegisterFlags(f) + cfg.FrontendV2.RegisterFlags(f) f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") } -func (cfg *CombinedFrontendConfig) InitFrontend(limits Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *Frontend, *frontend2.Frontend2, error) { +// Initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at +// all if downstream Prometheus URL is used instead. +// +// Returned RoundTripper can be wrapped in more round-tripper middlewares, and then eventually registered +// into HTTP server using the Handler from this package. Returned RoundTripper is always non-nil +// (if there are no errors), and it uses the returned frontend (if any). +func InitFrontend(cfg CombinedFrontendConfig, limits Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *Frontend, *frontend2.Frontend2, error) { switch { case cfg.DownstreamURL != "": // If the user has specified a downstream Prometheus, then we should use that. rt, err := NewDownstreamRoundTripper(cfg.DownstreamURL) return rt, nil, nil, err - case cfg.Frontend2.SchedulerAddr != "": + case cfg.FrontendV2.SchedulerAddress != "": // If query-scheduler address is configured, use Frontend2. - if cfg.Frontend2.Addr == "" { - addr, err := util.GetFirstAddressOf(cfg.Frontend2.InfNames) + if cfg.FrontendV2.Addr == "" { + addr, err := util.GetFirstAddressOf(cfg.FrontendV2.InfNames) if err != nil { return nil, nil, nil, errors.Wrap(err, "failed to get frontend address") } - cfg.Frontend2.Addr = addr + cfg.FrontendV2.Addr = addr } - if cfg.Frontend2.Port == 0 { - cfg.Frontend2.Port = grpcListenPort + if cfg.FrontendV2.Port == 0 { + cfg.FrontendV2.Port = grpcListenPort } - fr, err := frontend2.NewFrontend2(cfg.Frontend2, log, reg) + fr, err := frontend2.NewFrontend2(cfg.FrontendV2, log, reg) return AdaptGrpcRoundTripperToHTTPRoundTripper(fr), nil, fr, err default: // No scheduler = use original frontend. - fr, err := New(cfg.Frontend, limits, log, reg) + fr, err := New(cfg.FrontendV1, limits, log, reg) if err != nil { return nil, nil, nil, err } @@ -83,9 +89,12 @@ func (cfg *CombinedWorkerConfig) RegisterFlags(f *flag.FlagSet) { cfg.WorkerV2.RegisterFlags(f) } -func (cfg *CombinedWorkerConfig) InitQuerierWorker(querierCfg querier.Config, handler http.Handler, log log.Logger) (services.Service, error) { +// Initializes querier-worker, which uses either configured query-scheduler or query-frontend, +// or if none is specified and no worker is necessary returns nil (in that case queries are +// received directly from HTTP server). +func InitQuerierWorker(cfg CombinedWorkerConfig, querierCfg querier.Config, handler http.Handler, log log.Logger) (services.Service, error) { switch { - case cfg.WorkerV2.SchedulerAddr != "": + case cfg.WorkerV2.SchedulerAddress != "": // Copy settings from querier v1 config struct. cfg.WorkerV2.GRPCClientConfig = cfg.WorkerV1.GRPCClientConfig cfg.WorkerV2.MatchMaxConcurrency = cfg.WorkerV1.MatchMaxConcurrency @@ -93,15 +102,14 @@ func (cfg *CombinedWorkerConfig) InitQuerierWorker(querierCfg querier.Config, ha cfg.WorkerV2.Parallelism = cfg.WorkerV1.Parallelism cfg.WorkerV2.QuerierID = cfg.WorkerV1.QuerierID - level.Info(log).Log("msg", "Starting querier worker v2 with scheduler", "scheduler", cfg.WorkerV2.SchedulerAddr) + level.Info(log).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.WorkerV2.SchedulerAddress) return frontend2.NewQuerierSchedulerWorkers(cfg.WorkerV2, httpgrpc_server.NewServer(handler), prometheus.DefaultRegisterer, log) case cfg.WorkerV1.FrontendAddress != "": - level.Info(log).Log("msg", "Starting querier worker v1 with frontend", "frontend", cfg.WorkerV1.FrontendAddress) + level.Info(log).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.WorkerV1.FrontendAddress) return NewWorker(cfg.WorkerV1, querierCfg, httpgrpc_server.NewServer(handler), log) default: - // No querier worker is necessary, querier will receive queries directly from HTTP server. return nil, nil } } diff --git a/pkg/querier/frontend/frontend_test.go b/pkg/querier/frontend/frontend_test.go index a9b20e36e36..df2acc49118 100644 --- a/pkg/querier/frontend/frontend_test.go +++ b/pkg/querier/frontend/frontend_test.go @@ -386,7 +386,7 @@ func testFrontend(t *testing.T, config CombinedFrontendConfig, handler http.Hand httpListen, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) - rt, v1, v2, err := config.InitFrontend(limits{}, 0, logger, nil) + rt, v1, v2, err := InitFrontend(config, limits{}, 0, logger, nil) require.NoError(t, err) require.NotNil(t, rt) // v1 will be nil if DownstreamURL is defined. @@ -432,8 +432,8 @@ func defaultFrontendConfig() CombinedFrontendConfig { config := CombinedFrontendConfig{} flagext.DefaultValues(&config) flagext.DefaultValues(&config.Handler) - flagext.DefaultValues(&config.Frontend) - flagext.DefaultValues(&config.Frontend2) + flagext.DefaultValues(&config.FrontendV1) + flagext.DefaultValues(&config.FrontendV2) return config } diff --git a/pkg/querier/frontend2/frontend.proto b/pkg/querier/frontend2/frontend.proto index e420602ba25..4b76e0b3d22 100644 --- a/pkg/querier/frontend2/frontend.proto +++ b/pkg/querier/frontend2/frontend.proto @@ -10,10 +10,10 @@ import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; -// Scheduler interface exposed to Queriers. (Previously called Frontend) +// Scheduler interface exposed to Queriers. service SchedulerForQuerier { - // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for - // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. // // Long-running loop is used to detect broken connection between scheduler and querier. This is important @@ -28,6 +28,8 @@ message QuerierToScheduler { } message SchedulerToQuerier { + // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), + // it identifies the query by using this ID. uint64 queryID = 1; httpgrpc.HTTPRequest httpRequest = 2; @@ -46,6 +48,9 @@ service FrontendForQuerier { message QueryResultRequest { uint64 queryID = 1; httpgrpc.HTTPResponse httpResponse = 2; + + // There is no userID field here, because Querier puts userID into the context when + // calling QueryResult, and that is where Frontend expects to find it. } message QueryResultResponse { } @@ -73,7 +78,8 @@ message FrontendToScheduler { // Used by INIT message. Will be put into all requests passed to querier. string frontendAddress = 2; - // Used by ENQUEUE and CANCEL. Each enqueued query must have queryID higher than previous one. + // Used by ENQUEUE and CANCEL. + // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. uint64 queryID = 3; // Following are used by ENQUEUE only. diff --git a/pkg/querier/frontend2/frontend2.go b/pkg/querier/frontend2/frontend2.go index b46cc50a3ea..f6258200735 100644 --- a/pkg/querier/frontend2/frontend2.go +++ b/pkg/querier/frontend2/frontend2.go @@ -26,13 +26,13 @@ import ( // Config for a Frontend2. type Config struct { - SchedulerAddr string `yaml:"scheduler_address"` + SchedulerAddress string `yaml:"scheduler_address"` DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` // Used to find local IP address, that is sent to scheduler and querier-worker. - InfNames []string `yaml:"interface_names"` + InfNames []string `yaml:"instance_interface_names"` // If set, address is not computed from interfaces. Addr string `yaml:"address" doc:"hidden"` @@ -40,14 +40,14 @@ type Config struct { } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.SchedulerAddr, "frontend.scheduler-address", "", "DNS hostname used for finding schedulers.") - f.DurationVar(&cfg.DNSLookupPeriod, "frontend.scheduler-dns-lookup-period", 10*time.Second, "How often to query DNS.") - f.IntVar(&cfg.WorkerConcurrency, "frontend.scheduler-worker-concurrency", 5, "Number of goroutines pushing requests to ") + f.StringVar(&cfg.SchedulerAddress, "frontend.scheduler-address", "", "DNS hostname used for finding query-schedulers.") + f.DurationVar(&cfg.DNSLookupPeriod, "frontend.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.") + f.IntVar(&cfg.WorkerConcurrency, "frontend.scheduler-worker-concurrency", 5, "Number of concurrent workers forwarding queries to single query-scheduler.") cfg.InfNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.interface", "Name of network interface to read address from.") - f.StringVar(&cfg.Addr, "frontend.address", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") - f.IntVar(&cfg.Port, "frontend.port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") + f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.instance-interface-names", "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.") + f.StringVar(&cfg.Addr, "frontend.instance-address", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") + f.IntVar(&cfg.Port, "frontend.instance-port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) } @@ -80,9 +80,18 @@ type frontendRequest struct { response chan *httpgrpc.HTTPResponse } +type enqueueStatus int + +const ( + // Sent to scheduler successfully, and frontend should wait for response now. + wait_for_response enqueueStatus = iota + + // Failed to forward request to scheduler, frontend will try again. + failed +) + type enqueueResult struct { - success bool // True if request was sent to scheduler successfully, and frontend should wait for response. - retry bool // Whether request can be retried. + status enqueueStatus cancelCh chan<- uint64 // Channel that can be used for request cancellation. If nil, cancellation is not possible. } @@ -103,19 +112,20 @@ func NewFrontend2(cfg Config, log log.Logger, reg prometheus.Registerer) (*Front schedulerWorkers: schedulerWorkers, requests: newRequestsInProgress(), } - // Randomize to avoid getting responses from queries sent before restart (which could lead to leak between tenants). + // Randomize to avoid getting responses from queries sent before restart, which could lead to mixing results + // between different queries. Note that frontend verifies the user, so it cannot leak results between tenants. // This isn't perfect, but better than nothing. f.lastQueryID.Store(rand.Uint64()) promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_frontend_queries_in_progress", + Name: "cortex_query_frontend_queries_in_progress", Help: "Number of queries in progress handled by this frontend.", }, func() float64 { return float64(f.requests.count()) }) promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_frontend_connected_schedulers", + Name: "cortex_query_frontend_connected_schedulers", Help: "Number of schedulers this frontend is connected to.", }, func() float64 { return float64(f.schedulerWorkers.getWorkersCount()) @@ -188,13 +198,12 @@ enqueueAgain: select { case <-ctx.Done(): return nil, ctx.Err() + case enqRes := <-freq.enqueue: - if enqRes.success { + if enqRes.status == wait_for_response { cancelCh = enqRes.cancelCh - break - } - - if enqRes.retry { + break // go wait for response. + } else if enqRes.status == failed { retries-- if retries > 0 { goto enqueueAgain @@ -221,14 +230,22 @@ enqueueAgain: } } -func (f *Frontend2) QueryResult(_ context.Context, qrReq *QueryResultRequest) (*QueryResultResponse, error) { +func (f *Frontend2) QueryResult(ctx context.Context, qrReq *QueryResultRequest) (*QueryResultResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + req := f.requests.get(qrReq.QueryID) - if req != nil { + // It is possible that some old response belonging to different user was received, if frontend has restarted. + // To avoid leaking query results between users, we verify the user here. + // To avoid mixing results from different queries, we randomize queryID counter on start. + if req != nil && req.userID == userID { select { case req.response <- qrReq.HttpResponse: // Should always be possible, unless QueryResult is called multiple times with the same queryID. default: - // If we cannot write to the channel, just ignore it. + level.Warn(f.log).Log("msg", "failed to write query result to the response channel", "queryID", qrReq.QueryID, "user", userID) } } diff --git a/pkg/querier/frontend2/frontend2_test.go b/pkg/querier/frontend2/frontend2_test.go index 4c3bef7490a..ad2f8c5a0e7 100644 --- a/pkg/querier/frontend2/frontend2_test.go +++ b/pkg/querier/frontend2/frontend2_test.go @@ -37,7 +37,7 @@ func setupFrontend2(t *testing.T, schedulerReplyFunc func(f *Frontend2, msg *Fro cfg := Config{} flagext.DefaultValues(&cfg) - cfg.SchedulerAddr = l.Addr().String() + cfg.SchedulerAddress = l.Addr().String() cfg.WorkerConcurrency = testFrontendWorkerConcurrency cfg.Addr = h cfg.Port = grpcPort diff --git a/pkg/querier/frontend2/frontend_scheduler_worker.go b/pkg/querier/frontend2/frontend_scheduler_worker.go index bd2837cedb3..78994fb0010 100644 --- a/pkg/querier/frontend2/frontend_scheduler_worker.go +++ b/pkg/querier/frontend2/frontend_scheduler_worker.go @@ -42,7 +42,7 @@ func newFrontendSchedulerWorkers(cfg Config, frontendAddress string, requestsCh workers: map[string]*frontendSchedulerWorker{}, } - w, err := NewDNSWatcher(cfg.SchedulerAddr, cfg.DNSLookupPeriod, f) + w, err := util.NewDNSWatcher(cfg.SchedulerAddress, cfg.DNSLookupPeriod, f) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (f *frontendSchedulerWorkers) AddressAdded(address string) { return } - // If not, start a new one. + // No worker for this address yet, start a new one. w = newFrontendSchedulerWorker(conn, address, f.frontendAddress, f.requestsCh, f.cfg.WorkerConcurrency, f.log) f.mu.Lock() @@ -97,7 +97,7 @@ func (f *frontendSchedulerWorkers) AddressAdded(address string) { // Can be nil if stopping has been called already. if f.workers != nil { f.workers[address] = w - go w.start() + w.start() } } @@ -248,35 +248,35 @@ func (w *frontendSchedulerWorker) schedulerLoop(ctx context.Context, loop Schedu }) if err != nil { - req.enqueue <- enqueueResult{success: false, retry: true} + req.enqueue <- enqueueResult{status: failed} return err } resp, err := loop.Recv() if err != nil { - req.enqueue <- enqueueResult{success: false, retry: true} + req.enqueue <- enqueueResult{status: failed} return err } switch resp.Status { case OK: - req.enqueue <- enqueueResult{success: true, cancelCh: w.cancelCh} + req.enqueue <- enqueueResult{status: wait_for_response, cancelCh: w.cancelCh} // Response will come from querier. case SHUTTING_DOWN: - // Scheduler is shutting down, report failure to enqueue and stop the loop. - req.enqueue <- enqueueResult{success: false, retry: true} + // Scheduler is shutting down, report failure to enqueue and stop this loop. + req.enqueue <- enqueueResult{status: failed} return errors.New("scheduler is shutting down") case ERROR: - req.enqueue <- enqueueResult{success: true, retry: false} + req.enqueue <- enqueueResult{status: wait_for_response} req.response <- &httpgrpc.HTTPResponse{ Code: http.StatusInternalServerError, Body: []byte(err.Error()), } case TOO_MANY_REQUESTS_PER_TENANT: - req.enqueue <- enqueueResult{success: true, retry: false} + req.enqueue <- enqueueResult{status: wait_for_response} req.response <- &httpgrpc.HTTPResponse{ Code: http.StatusTooManyRequests, Body: []byte("too many outstanding requests"), diff --git a/pkg/querier/frontend2/querier_scheduler_worker.go b/pkg/querier/frontend2/querier_scheduler_worker.go index 6daf84b1d64..a2d265950bc 100644 --- a/pkg/querier/frontend2/querier_scheduler_worker.go +++ b/pkg/querier/frontend2/querier_scheduler_worker.go @@ -35,8 +35,8 @@ type RequestHandler interface { } type QuerierWorkersConfig struct { - SchedulerAddr string `yaml:"scheduler_address"` - DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` + SchedulerAddress string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` // Following settings are not exposed via YAML or CLI Flags, but instead copied from "v1" worker config. GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"-"` // In v1 this is called "frontend client", here we use it for scheduler. @@ -47,8 +47,8 @@ type QuerierWorkersConfig struct { } func (cfg *QuerierWorkersConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.SchedulerAddr, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. If set, takes precedence over -querier.frontend-address.") - f.DurationVar(&cfg.DNSLookupPeriod, "querier.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve scheduler hostname.") + f.StringVar(&cfg.SchedulerAddress, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. If set, takes precedence over -querier.frontend-address.") + f.DurationVar(&cfg.DNSLookupPeriod, "querier.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.") } type querierSchedulerWorkers struct { @@ -70,7 +70,7 @@ type querierSchedulerWorkers struct { } func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler, reg prometheus.Registerer, log log.Logger) (services.Service, error) { - if cfg.SchedulerAddr == "" { + if cfg.SchedulerAddress == "" { return nil, errors.New("no scheduler address") } @@ -83,7 +83,7 @@ func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler } frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_querier_scheduler_worker_clients", + Name: "cortex_querier_scheduler_worker_frontend_clients", Help: "The current number of frontend clients.", }) @@ -109,7 +109,7 @@ func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler p := client.NewPool("frontend", poolConfig, nil, f.createFrontendClient, frontendClientsGauge, log) f.frontendPool = p - w, err := NewDNSWatcher(cfg.SchedulerAddr, cfg.DNSLookupPeriod, f) + w, err := util.NewDNSWatcher(cfg.SchedulerAddress, cfg.DNSLookupPeriod, f) if err != nil { return nil, err } diff --git a/pkg/querier/frontend2/scheduler.go b/pkg/querier/frontend2/scheduler.go index 024a55ebd37..6350139239c 100644 --- a/pkg/querier/frontend2/scheduler.go +++ b/pkg/querier/frontend2/scheduler.go @@ -62,11 +62,11 @@ type connectedFrontend struct { } type SchedulerConfig struct { - MaxOutstandingPerTenant int + MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant"` } func (cfg *SchedulerConfig) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler; requests beyond this error with HTTP 429.") + f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.") } // NewScheduler creates a new Scheduler. @@ -81,12 +81,12 @@ func NewScheduler(cfg SchedulerConfig, limits Limits, log log.Logger, registerer queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_query_scheduler_queue_duration_seconds", - Help: "Time spend by requests queued.", + Help: "Time spend by requests in queue before getting picked up by a querier.", Buckets: prometheus.DefBuckets, }), connectedWorkers: promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_connected_workers", - Help: "Number of worker clients currently connected to the frontend.", + Help: "Number of querier worker clients currently connected to the query-scheduler.", }, func() float64 { return float64(connectedQuerierWorkers.Load()) }), queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_queue_length", diff --git a/pkg/querier/frontend2/dns_watcher.go b/pkg/util/dns_watcher.go similarity index 93% rename from pkg/querier/frontend2/dns_watcher.go rename to pkg/util/dns_watcher.go index 5ca7820c248..d4af88f57ba 100644 --- a/pkg/querier/frontend2/dns_watcher.go +++ b/pkg/util/dns_watcher.go @@ -1,4 +1,4 @@ -package frontend2 +package util import ( "context" @@ -13,8 +13,10 @@ import ( // Notifications about address resolution. All notifications are sent on the same goroutine. type DNSNotifications interface { + // New address has been discovered by DNS watcher for supplied hostname. AddressAdded(address string) + // Previously-discovered address is no longer resolved for the hostname. AddressRemoved(address string) } From 4c672186ea6a7ddcaec7ed51490574672d56968c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 29 Oct 2020 23:27:42 +0100 Subject: [PATCH 5/8] Fixed docs. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- docs/configuration/config-file-reference.md | 25 ++++++++++++--------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 403217b19aa..8e7ea1ec1a8 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -159,10 +159,11 @@ runtime_config: [memberlist: ] query_scheduler: - # Maximum number of outstanding requests per tenant per query-scheduler; - # requests beyond this error with HTTP 429. + # Maximum number of outstanding requests per tenant per query-scheduler. + # In-flight requests above this limit will fail with HTTP response status code + # 429. # CLI flag: -query-scheduler.max-outstanding-requests-per-tenant - [maxoutstandingpertenant: | default = 100] + [max_outstanding_requests_per_tenant: | default = 100] ``` ### `server_config` @@ -777,15 +778,16 @@ The `query_frontend_config` configures the Cortex query-frontend. # CLI flag: -querier.max-outstanding-requests-per-tenant [max_outstanding_per_tenant: | default = 100] -# DNS hostname used for finding schedulers. +# DNS hostname used for finding query-schedulers. # CLI flag: -frontend.scheduler-address [scheduler_address: | default = ""] -# How often to query DNS. +# How often to resolve the scheduler-address, in order to look for new +# query-scheduler instances. # CLI flag: -frontend.scheduler-dns-lookup-period [scheduler_dns_lookup_period: | default = 10s] -# Number of goroutines pushing requests to +# Number of concurrent workers forwarding queries to single query-scheduler. # CLI flag: -frontend.scheduler-worker-concurrency [scheduler_worker_concurrency: | default = 5] @@ -852,9 +854,11 @@ grpc_client_config: # CLI flag: -frontend.grpc-client-config.tls-insecure-skip-verify [tls_insecure_skip_verify: | default = false] -# Name of network interface to read address from. -# CLI flag: -frontend.interface -[interface_names: | default = [eth0 en0]] +# Name of network interface to read address from. This address is sent to +# query-scheduler and querier, which uses it to send the query response back to +# query-frontend. +# CLI flag: -frontend.instance-interface-names +[instance_interface_names: | default = [eth0 en0]] # Compress HTTP responses. # CLI flag: -querier.compress-http-responses @@ -2633,7 +2637,8 @@ grpc_client_config: # CLI flag: -querier.scheduler-address [scheduler_address: | default = ""] -# How often to resolve scheduler hostname. +# How often to resolve the scheduler-address, in order to look for new +# query-scheduler instances. # CLI flag: -querier.scheduler-dns-lookup-period [scheduler_dns_lookup_period: | default = 10s] ``` From d2f44585b2b068ece66a02a69d64940074ac4768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 29 Oct 2020 23:41:21 +0100 Subject: [PATCH 6/8] Scheduler now sends OK after frontend connects. This allows scheduler to also send shutting down error to frontend immediately. Frontend worker expects OK, and exits FrontendLoop otherwise. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/querier/frontend2/frontend.pb.go | 13 +++++---- pkg/querier/frontend2/frontend2_test.go | 28 +++++++++++++------ .../frontend2/frontend_scheduler_worker.go | 9 +++++- pkg/querier/frontend2/scheduler.go | 7 +++++ pkg/querier/frontend2/scheduler_test.go | 5 ++++ 5 files changed, 48 insertions(+), 14 deletions(-) diff --git a/pkg/querier/frontend2/frontend.pb.go b/pkg/querier/frontend2/frontend.pb.go index 038207c6430..8a7c033e15e 100644 --- a/pkg/querier/frontend2/frontend.pb.go +++ b/pkg/querier/frontend2/frontend.pb.go @@ -128,6 +128,8 @@ func (m *QuerierToScheduler) GetQuerierID() string { } type SchedulerToQuerier struct { + // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), + // it identifies the query by using this ID. QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` // Where should querier send HTTP Response to (using FrontendForQuerier interface). @@ -286,7 +288,8 @@ type FrontendToScheduler struct { Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=frontend2.FrontendToSchedulerType" json:"type,omitempty"` // Used by INIT message. Will be put into all requests passed to querier. FrontendAddress string `protobuf:"bytes,2,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` - // Used by ENQUEUE and CANCEL. Each enqueued query must have queryID higher than previous one. + // Used by ENQUEUE and CANCEL. + // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. QueryID uint64 `protobuf:"varint,3,opt,name=queryID,proto3" json:"queryID,omitempty"` // Following are used by ENQUEUE only. UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` @@ -745,8 +748,8 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SchedulerForQuerierClient interface { - // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for - // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. // // Long-running loop is used to detect broken connection between scheduler and querier. This is important @@ -795,8 +798,8 @@ func (x *schedulerForQuerierQuerierLoopClient) Recv() (*SchedulerToQuerier, erro // SchedulerForQuerierServer is the server API for SchedulerForQuerier service. type SchedulerForQuerierServer interface { - // After calling this method, both Querier and Scheduler enters a loop, in which querier waits for - // a "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. // // Long-running loop is used to detect broken connection between scheduler and querier. This is important diff --git a/pkg/querier/frontend2/frontend2_test.go b/pkg/querier/frontend2/frontend2_test.go index ad2f8c5a0e7..c19755bb522 100644 --- a/pkg/querier/frontend2/frontend2_test.go +++ b/pkg/querier/frontend2/frontend2_test.go @@ -76,24 +76,28 @@ func setupFrontend2(t *testing.T, schedulerReplyFunc func(f *Frontend2, msg *Fro return f, ms } -func sendResponseWithDelay(f *Frontend2, delay time.Duration, queryID uint64, resp *httpgrpc.HTTPResponse) { +func sendResponseWithDelay(f *Frontend2, delay time.Duration, userID string, queryID uint64, resp *httpgrpc.HTTPResponse) { if delay > 0 { time.Sleep(delay) } - _, _ = f.QueryResult(context.Background(), &QueryResultRequest{ + ctx := user.InjectOrgID(context.Background(), userID) + _, _ = f.QueryResult(ctx, &QueryResultRequest{ QueryID: queryID, HttpResponse: resp, }) } func TestFrontendBasicWorkflow(t *testing.T) { - body := "all fine here" + const ( + body = "all fine here" + userID = "test" + ) f, _ := setupFrontend2(t, func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend { // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. // It first needs to be told that enqueuing has succeeded. - go sendResponseWithDelay(f, 100*time.Millisecond, msg.QueryID, &httpgrpc.HTTPResponse{ + go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, &httpgrpc.HTTPResponse{ Code: 200, Body: []byte(body), }) @@ -101,7 +105,7 @@ func TestFrontendBasicWorkflow(t *testing.T) { return &SchedulerToFrontend{Status: OK} }) - resp, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), "test"), &httpgrpc.HTTPRequest{}) + resp, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), userID), &httpgrpc.HTTPRequest{}) require.NoError(t, err) require.Equal(t, int32(200), resp.Code) require.Equal(t, []byte(body), resp.Body) @@ -110,7 +114,10 @@ func TestFrontendBasicWorkflow(t *testing.T) { func TestFrontendRetryEnqueue(t *testing.T) { // Frontend uses worker concurrency to compute number of retries. We use one less failure. failures := atomic.NewInt64(testFrontendWorkerConcurrency - 1) - body := "hello world" + const ( + body = "hello world" + userID = "test" + ) f, _ := setupFrontend2(t, func(f *Frontend2, msg *FrontendToScheduler) *SchedulerToFrontend { fail := failures.Dec() @@ -118,7 +125,7 @@ func TestFrontendRetryEnqueue(t *testing.T) { return &SchedulerToFrontend{Status: SHUTTING_DOWN} } - go sendResponseWithDelay(f, 100*time.Millisecond, msg.QueryID, &httpgrpc.HTTPResponse{ + go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, &httpgrpc.HTTPResponse{ Code: 200, Body: []byte(body), }) @@ -126,7 +133,7 @@ func TestFrontendRetryEnqueue(t *testing.T) { return &SchedulerToFrontend{Status: OK} }) - _, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), "test"), &httpgrpc.HTTPRequest{}) + _, err := f.RoundTripGRPC(user.InjectOrgID(context.Background(), userID), &httpgrpc.HTTPRequest{}) require.NoError(t, err) } @@ -236,6 +243,11 @@ func (m *mockScheduler) FrontendLoop(frontend SchedulerForFrontend_FrontendLoopS m.frontendAddr[init.FrontendAddress]++ m.mu.Unlock() + // Ack INIT from frontend. + if err := frontend.Send(&SchedulerToFrontend{Status: OK}); err != nil { + return err + } + for { msg, err := frontend.Recv() if err != nil { diff --git a/pkg/querier/frontend2/frontend_scheduler_worker.go b/pkg/querier/frontend2/frontend_scheduler_worker.go index 78994fb0010..815cef0f53a 100644 --- a/pkg/querier/frontend2/frontend_scheduler_worker.go +++ b/pkg/querier/frontend2/frontend_scheduler_worker.go @@ -2,13 +2,13 @@ package frontend2 import ( "context" - "errors" "net/http" "sync" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc" @@ -233,6 +233,13 @@ func (w *frontendSchedulerWorker) schedulerLoop(ctx context.Context, loop Schedu return err } + if resp, err := loop.Recv(); err != nil || resp.Status != OK { + if err != nil { + return err + } + return errors.Errorf("unexpected status received: %v", resp.Status) + } + for { select { case <-ctx.Done(): diff --git a/pkg/querier/frontend2/scheduler.go b/pkg/querier/frontend2/scheduler.go index 6350139239c..1d3070a7c16 100644 --- a/pkg/querier/frontend2/scheduler.go +++ b/pkg/querier/frontend2/scheduler.go @@ -166,6 +166,13 @@ func (s *Scheduler) FrontendLoop(frontend SchedulerForFrontend_FrontendLoopServe } defer s.frontendDisconnected(frontendAddress) + // Response to INIT. If scheduler is not running, we skip for-loop, send SHUTTING_DOWN and exit this method. + if s.State() == services.Running { + if err := frontend.Send(&SchedulerToFrontend{Status: OK}); err != nil { + return err + } + } + // We stop accepting new queries in Stopping state. By returning quickly, we disconnect frontends, which in turns // cancels all their queries. for s.State() == services.Running { diff --git a/pkg/querier/frontend2/scheduler_test.go b/pkg/querier/frontend2/scheduler_test.go index f590f3bc3f7..ae399503bbe 100644 --- a/pkg/querier/frontend2/scheduler_test.go +++ b/pkg/querier/frontend2/scheduler_test.go @@ -331,6 +331,11 @@ func initFrontendLoop(t *testing.T, client SchedulerForFrontendClient, frontendA FrontendAddress: frontendAddr, })) + // Scheduler acks INIT by sending OK back. + resp, err := loop.Recv() + require.NoError(t, err) + require.True(t, resp.Status == OK) + return loop } From 203e78883a4265de03098249e0051a68509a04e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 29 Oct 2020 23:54:54 +0100 Subject: [PATCH 7/8] Fixed naming. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/querier/frontend2/frontend2.go | 4 ++-- pkg/querier/frontend2/frontend_scheduler_worker.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/querier/frontend2/frontend2.go b/pkg/querier/frontend2/frontend2.go index f6258200735..9e1c8919949 100644 --- a/pkg/querier/frontend2/frontend2.go +++ b/pkg/querier/frontend2/frontend2.go @@ -84,7 +84,7 @@ type enqueueStatus int const ( // Sent to scheduler successfully, and frontend should wait for response now. - wait_for_response enqueueStatus = iota + waitForResponse enqueueStatus = iota // Failed to forward request to scheduler, frontend will try again. failed @@ -200,7 +200,7 @@ enqueueAgain: return nil, ctx.Err() case enqRes := <-freq.enqueue: - if enqRes.status == wait_for_response { + if enqRes.status == waitForResponse { cancelCh = enqRes.cancelCh break // go wait for response. } else if enqRes.status == failed { diff --git a/pkg/querier/frontend2/frontend_scheduler_worker.go b/pkg/querier/frontend2/frontend_scheduler_worker.go index 815cef0f53a..9d50b7b5088 100644 --- a/pkg/querier/frontend2/frontend_scheduler_worker.go +++ b/pkg/querier/frontend2/frontend_scheduler_worker.go @@ -267,7 +267,7 @@ func (w *frontendSchedulerWorker) schedulerLoop(ctx context.Context, loop Schedu switch resp.Status { case OK: - req.enqueue <- enqueueResult{status: wait_for_response, cancelCh: w.cancelCh} + req.enqueue <- enqueueResult{status: waitForResponse, cancelCh: w.cancelCh} // Response will come from querier. case SHUTTING_DOWN: @@ -276,14 +276,14 @@ func (w *frontendSchedulerWorker) schedulerLoop(ctx context.Context, loop Schedu return errors.New("scheduler is shutting down") case ERROR: - req.enqueue <- enqueueResult{status: wait_for_response} + req.enqueue <- enqueueResult{status: waitForResponse} req.response <- &httpgrpc.HTTPResponse{ Code: http.StatusInternalServerError, Body: []byte(err.Error()), } case TOO_MANY_REQUESTS_PER_TENANT: - req.enqueue <- enqueueResult{status: wait_for_response} + req.enqueue <- enqueueResult{status: waitForResponse} req.response <- &httpgrpc.HTTPResponse{ Code: http.StatusTooManyRequests, Body: []byte("too many outstanding requests"), From 8310eed19081bc6c68ec6a139d28996f5f045930 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 30 Oct 2020 12:08:34 +0100 Subject: [PATCH 8/8] Minor tweaks Signed-off-by: Marco Pracucci --- pkg/querier/frontend/config.go | 32 +++++++++---------- pkg/querier/frontend2/frontend2.go | 2 +- .../frontend2/querier_scheduler_worker.go | 4 +-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/pkg/querier/frontend/config.go b/pkg/querier/frontend/config.go index d615d1a82cd..18c11a9299b 100644 --- a/pkg/querier/frontend/config.go +++ b/pkg/querier/frontend/config.go @@ -35,6 +35,22 @@ func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") } +// Configuration for both querier workers, V1 (using frontend) and V2 (using scheduler). Since many flags are reused +// between the two, they are exposed to YAML/CLI in V1 version (WorkerConfig), and copied to V2 in the init method. +type CombinedWorkerConfig struct { + WorkerV1 WorkerConfig `yaml:",inline"` + WorkerV2 frontend2.QuerierWorkersConfig `yaml:",inline"` +} + +func (cfg *CombinedWorkerConfig) RegisterFlags(f *flag.FlagSet) { + cfg.WorkerV1.RegisterFlags(f) + cfg.WorkerV2.RegisterFlags(f) +} + +func (cfg *CombinedWorkerConfig) Validate(logger log.Logger) error { + return cfg.WorkerV1.Validate(logger) +} + // Initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at // all if downstream Prometheus URL is used instead. // @@ -77,18 +93,6 @@ func InitFrontend(cfg CombinedFrontendConfig, limits Limits, grpcListenPort int, } } -// Configuration for both querier workers, V1 (using frontend) and V2 (using scheduler). Since many flags are reused -// between the two, they are exposed to YAML/CLI in V1 version (WorkerConfig), and copied to V2 in the init method. -type CombinedWorkerConfig struct { - WorkerV1 WorkerConfig `yaml:",inline"` - WorkerV2 frontend2.QuerierWorkersConfig `yaml:",inline"` -} - -func (cfg *CombinedWorkerConfig) RegisterFlags(f *flag.FlagSet) { - cfg.WorkerV1.RegisterFlags(f) - cfg.WorkerV2.RegisterFlags(f) -} - // Initializes querier-worker, which uses either configured query-scheduler or query-frontend, // or if none is specified and no worker is necessary returns nil (in that case queries are // received directly from HTTP server). @@ -113,7 +117,3 @@ func InitQuerierWorker(cfg CombinedWorkerConfig, querierCfg querier.Config, hand return nil, nil } } - -func (cfg *CombinedWorkerConfig) Validate(logger log.Logger) error { - return cfg.WorkerV1.Validate(logger) -} diff --git a/pkg/querier/frontend2/frontend2.go b/pkg/querier/frontend2/frontend2.go index 9e1c8919949..23b78e2678e 100644 --- a/pkg/querier/frontend2/frontend2.go +++ b/pkg/querier/frontend2/frontend2.go @@ -46,7 +46,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.InfNames = []string{"eth0", "en0"} f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.instance-interface-names", "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.") - f.StringVar(&cfg.Addr, "frontend.instance-address", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") + f.StringVar(&cfg.Addr, "frontend.instance-addr", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") f.IntVar(&cfg.Port, "frontend.instance-port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) diff --git a/pkg/querier/frontend2/querier_scheduler_worker.go b/pkg/querier/frontend2/querier_scheduler_worker.go index a2d265950bc..525a46e0a7f 100644 --- a/pkg/querier/frontend2/querier_scheduler_worker.go +++ b/pkg/querier/frontend2/querier_scheduler_worker.go @@ -83,7 +83,7 @@ func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler } frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_querier_scheduler_worker_frontend_clients", + Name: "cortex_query_scheduler_worker_frontend_clients", Help: "The current number of frontend clients.", }) @@ -94,7 +94,7 @@ func NewQuerierSchedulerWorkers(cfg QuerierWorkersConfig, handler RequestHandler workers: map[string]*querierSchedulerWorker{}, frontendClientRequestDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "cortex_querier_scheduler_worker_frontend_request_duration_seconds", + Name: "cortex_query_scheduler_worker_frontend_request_duration_seconds", Help: "Time spend doing requests to frontend.", Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), }, []string{"operation", "status_code"}),