diff --git a/go.mod b/go.mod index 5af2fcd6628..24962c178c0 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/go-autorest v11.5.1+incompatible // indirect github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 github.com/NYTimes/gziphandler v1.1.1 - github.com/aws/aws-sdk-go v1.22.4 + github.com/aws/aws-sdk-go v1.23.12 github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect github.com/blang/semver v3.5.0+incompatible github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect @@ -54,13 +54,12 @@ require ( github.com/pkg/errors v0.8.1 github.com/prometheus/alertmanager v0.19.0 github.com/prometheus/client_golang v1.1.0 - github.com/prometheus/common v0.6.0 - github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f + github.com/prometheus/common v0.7.0 + github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 github.com/satori/go.uuid v1.2.0 // indirect github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sercand/kuberesolver v2.1.0+incompatible // indirect - github.com/sirupsen/logrus v1.4.2 // indirect - github.com/stretchr/testify v1.3.0 + github.com/stretchr/testify v1.4.0 github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect github.com/uber-go/atomic v1.3.2 // indirect diff --git a/go.sum b/go.sum index b7bf4760d9a..b7caf0a226b 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.22.4 h1:Mcq67g9mZEBvBuj/x7mF9KCyw5M8/4I/cjQPkdCsq0I= github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.12 h1:2UnxgNO6Y5J1OrkXS8XNp0UatDxD1bWHiDT62RDPggI= +github.com/aws/aws-sdk-go v1.23.12/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -478,6 +480,8 @@ github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7q github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -489,6 +493,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f h1:7C9G4yUogM8QP85pmf11vlBPuV6u2mPbqvbjPVKcNis= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= +github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 h1:W4dTblzSVIBNfDimJhh70OpZQQMwLVpwK50scXdH94w= +github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0/go.mod h1:elNqjVbwD3sCZJqKzyN7uEuwGcCpeJvv67D6BrHsDbw= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -533,6 +539,8 @@ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRci github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d h1:Ninez2SUm08xpmnw7kVxCeOc3DahF6IuMuRMCdM4wTQ= @@ -727,6 +735,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 6c76d4ce7a8..08f47d86dea 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -206,7 +206,7 @@ func (t *Cortex) initQuerier(cfg *Config) (err error) { false, // Disable admin APIs. util.Logger, querier.DummyRulesRetriever{}, - 0, 0, // Remote read samples and concurrency limit. + 0, 0, 0, // Remote read samples and concurrency limit. regexp.MustCompile(".*"), ) promRouter := route.New().WithPrefix("/api/prom/api/v1") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d044c..2f20d31028d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -14,12 +14,12 @@ import ( // struct and override the specific methods. For example, to override only // the MaxRetries method: // -// type retryer struct { -// client.DefaultRetryer -// } +// type retryer struct { +// client.DefaultRetryer +// } // -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -33,9 +33,9 @@ func (d DefaultRetryer) MaxRetries() int { // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { + var minTime int64 = 30 + isThrottle := r.IsErrorThrottle() + if isThrottle { if delay, ok := getRetryDelay(r); ok { return delay } @@ -44,13 +44,13 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { } retryCount := r.RetryCount - if throttle && retryCount > 8 { + if isThrottle && retryCount > 8 { retryCount = 8 } else if retryCount > 13 { retryCount = 13 } - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Int63n(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -65,21 +65,8 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 10634d173d3..fd1e240f6eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -251,7 +251,7 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index f8853d78af2..0c60e612ea5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index c2b2c5d65c3..1a7af53a4da 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index 20510d9aec8..b20b6339484 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -76,12 +76,15 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(now().UnixNano(), 10) } - resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{ + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), }) - if err != nil { + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 2c8d5f56d0e..d126764ce4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -152,18 +152,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index e5031aadc6f..f227210cd91 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -320,6 +320,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -339,6 +340,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -346,8 +348,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -581,6 +586,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1588,6 +1594,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1631,6 +1638,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -1751,9 +1759,33 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1962,11 +1994,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2054,6 +2089,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2124,6 +2160,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2652,6 +2689,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2659,8 +2697,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", @@ -3134,6 +3175,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3287,6 +3329,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3507,9 +3550,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3932,7 +3977,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -4515,6 +4561,7 @@ var awsusgovPartition = partition{ "health": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4613,6 +4660,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index e7c9b2b61af..8e332cce6a6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net" "net/http" "net/url" "reflect" @@ -65,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -498,21 +506,17 @@ func (r *Request) Send() error { if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryError(r.Error) { + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - continue } } } @@ -596,51 +600,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryError(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index d0aa54c6d10..c4e659b01d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,23 +1,41 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { cfg.Retryer = retryer return cfg @@ -108,32 +126,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +219,44 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case 429, 502, 503, 504: + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 3a998d5bd62..60a6f9ce2a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -99,10 +99,10 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string - CSMClientID string CSMHost string + CSMClientID string // Enables endpoint discovery via environment variables. // @@ -230,7 +230,11 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } regionKeys := regionEnvKeys profileKeys := profileEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 1b4fcdb10e1..fa1362ff740 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -104,9 +104,13 @@ func New(cfgs ...*aws.Config) *Session { } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { err = fmt.Errorf("failed to enable CSM, %v", err) s.Config.Logger.Log("ERROR:", err.Error()) @@ -347,15 +351,12 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, - clientID, host, port string, - logger aws.Logger, -) error { +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { if logger != nil { logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { return err } @@ -395,7 +396,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, // Load additional config from file(s) sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { return nil, err } } @@ -410,9 +417,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { return nil, err } @@ -428,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 5170b4982e0..d91ac93a544 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -22,6 +22,12 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` @@ -76,6 +82,12 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string } type sharedConfigFile struct { @@ -251,10 +263,13 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v - } + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) return nil } @@ -348,6 +363,16 @@ func updateString(dst *string, section ini.Section, key string) { *dst = section.String(key) } +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b0009b3bf88..742d63a3232 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.22.4" +const SDKVersion = "1.23.12" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..44898eed0fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..810ec7f08b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index de021367da2..74e361e070d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index cf569645dc2..07a6187ea62 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -39,7 +39,7 @@ func Build(r *request.Request) { r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + 0, r.RequestID, ) return diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f810..05d4ff51925 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go index d27d68b66a1..83cc7565b92 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -815,6 +815,12 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *Describe Name: opDescribeScheduledActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -881,6 +887,56 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsWithContext(ctx aws.Con return out, req.Send() } +// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. +// pageNum := 0 +// err := client.DescribeScheduledActionsPages(params, +// func(page *applicationautoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool) error { + return c.DescribeScheduledActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledActionsPagesWithContext same as DescribeScheduledActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationAutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) + } + return p.Err() +} + const opPutScalingPolicy = "PutScalingPolicy" // PutScalingPolicyRequest generates a "aws/request.Request" representing the @@ -3315,6 +3371,26 @@ type RegisterScalableTargetInput struct { // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // An embedded object that contains attributes and attribute values that are + // used to suspend and resume automatic scaling. Setting the value of an attribute + // to true suspends the specified scaling activities. Setting it to false (default) + // resumes the specified scaling activities. + // + // Suspension Outcomes + // + // * For DynamicScalingInSuspended, while a suspension is in effect, all + // scale-in activities that are triggered by a scaling policy are suspended. + // + // * For DynamicScalingOutSuspended, while a suspension is in effect, all + // scale-out activities that are triggered by a scaling policy are suspended. + // + // * For ScheduledScalingSuspended, while a suspension is in effect, all + // scaling activities that involve scheduled actions are suspended. + // + // For more information, see Suspend and Resume Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) + // in the Application Auto Scaling User Guide. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3388,6 +3464,12 @@ func (s *RegisterScalableTargetInput) SetServiceNamespace(v string) *RegisterSca return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *RegisterScalableTargetInput) SetSuspendedState(v *SuspendedState) *RegisterScalableTargetInput { + s.SuspendedState = v + return s +} + type RegisterScalableTargetOutput struct { _ struct{} `type:"structure"` } @@ -3508,6 +3590,10 @@ type ScalableTarget struct { // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // Specifies whether the scaling activities for a scalable target are in a suspended + // state. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3562,6 +3648,12 @@ func (s *ScalableTarget) SetServiceNamespace(v string) *ScalableTarget { return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *ScalableTarget) SetSuspendedState(v *SuspendedState) *ScalableTarget { + s.SuspendedState = v + return s +} + // Represents the minimum and maximum capacity for a scheduled action. type ScalableTargetAction struct { _ struct{} `type:"structure"` @@ -4390,6 +4482,55 @@ func (s *StepScalingPolicyConfiguration) SetStepAdjustments(v []*StepAdjustment) return s } +// Specifies whether the scaling activities for a scalable target are in a suspended +// state. +type SuspendedState struct { + _ struct{} `type:"structure"` + + // Whether scale in by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to remove capacity when a scaling policy is triggered. The default is false. + DynamicScalingInSuspended *bool `type:"boolean"` + + // Whether scale out by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to add capacity when a scaling policy is triggered. The default is false. + DynamicScalingOutSuspended *bool `type:"boolean"` + + // Whether scheduled scaling is suspended. Set the value to true if you don't + // want Application Auto Scaling to add or remove capacity by initiating scheduled + // actions. The default is false. + ScheduledScalingSuspended *bool `type:"boolean"` +} + +// String returns the string representation +func (s SuspendedState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedState) GoString() string { + return s.String() +} + +// SetDynamicScalingInSuspended sets the DynamicScalingInSuspended field's value. +func (s *SuspendedState) SetDynamicScalingInSuspended(v bool) *SuspendedState { + s.DynamicScalingInSuspended = &v + return s +} + +// SetDynamicScalingOutSuspended sets the DynamicScalingOutSuspended field's value. +func (s *SuspendedState) SetDynamicScalingOutSuspended(v bool) *SuspendedState { + s.DynamicScalingOutSuspended = &v + return s +} + +// SetScheduledScalingSuspended sets the ScheduledScalingSuspended field's value. +func (s *SuspendedState) SetScheduledScalingSuspended(v bool) *SuspendedState { + s.ScheduledScalingSuspended = &v + return s +} + // Represents a target tracking scaling policy configuration to use with Application // Auto Scaling. type TargetTrackingScalingPolicyConfiguration struct { diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go index 561d28cee2d..a5165752810 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go @@ -97,6 +97,9 @@ type ApplicationAutoScalingAPI interface { DescribeScheduledActionsWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, ...request.Option) (*applicationautoscaling.DescribeScheduledActionsOutput, error) DescribeScheduledActionsRequest(*applicationautoscaling.DescribeScheduledActionsInput) (*request.Request, *applicationautoscaling.DescribeScheduledActionsOutput) + DescribeScheduledActionsPages(*applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool) error + DescribeScheduledActionsPagesWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool, ...request.Option) error + PutScalingPolicy(*applicationautoscaling.PutScalingPolicyInput) (*applicationautoscaling.PutScalingPolicyOutput, error) PutScalingPolicyWithContext(aws.Context, *applicationautoscaling.PutScalingPolicyInput, ...request.Option) (*applicationautoscaling.PutScalingPolicyOutput, error) PutScalingPolicyRequest(*applicationautoscaling.PutScalingPolicyInput) (*request.Request, *applicationautoscaling.PutScalingPolicyOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go index 53002308c4d..fbaa9db4693 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go @@ -24,7 +24,7 @@ // // API Summary // -// The Application Auto Scaling service API includes two key sets of actions: +// The Application Auto Scaling service API includes three key sets of actions: // // * Register and manage scalable targets - Register AWS or custom resources // as scalable targets (a resource that Application Auto Scaling can scale), @@ -36,6 +36,12 @@ // one-time or recurring scaling actions, and retrieve your recent scaling // activity history. // +// * Suspend and resume scaling - Temporarily suspend and later resume automatic +// scaling by calling the RegisterScalableTarget action for any Application +// Auto Scaling scalable target. You can suspend and resume, individually +// or in combination, scale-out activities triggered by a scaling policy, +// scale-in activities triggered by a scaling policy, and scheduled scaling. +// // To learn more about Application Auto Scaling, including information about // granting IAM users required permissions for Application Auto Scaling actions, // see the Application Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 0385cb15b4f..4b4802db0ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -12685,6 +12685,80 @@ func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeEla return out, req.Send() } +const opDescribeExportImageTasks = "DescribeExportImageTasks" + +// DescribeExportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportImageTasks for more information on using the DescribeExportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportImageTasksRequest method. +// req, resp := client.DescribeExportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInput) (req *request.Request, output *DescribeExportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportImageTasksInput{} + } + + output = &DescribeExportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export image tasks or all your export image tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasks(input *DescribeExportImageTasksInput) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeExportImageTasksWithContext is the same as DescribeExportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, opts ...request.Option) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the @@ -12729,7 +12803,8 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export tasks or all your export tasks. +// Describes the specified export instance tasks or all your export instance +// tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -24001,6 +24076,82 @@ func (c *EC2) ExportClientVpnClientConfigurationWithContext(ctx aws.Context, inp return out, req.Send() } +const opExportImage = "ExportImage" + +// ExportImageRequest generates a "aws/request.Request" representing the +// client's request for the ExportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportImage for more information on using the ExportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportImageRequest method. +// req, resp := client.ExportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, output *ExportImageOutput) { + op := &request.Operation{ + Name: opExportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportImageInput{} + } + + output = &ExportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportImage API operation for Amazon Elastic Compute Cloud. +// +// Exports an Amazon Machine Image (AMI) to a VM file. For more information, +// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImage(input *ExportImageInput) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + return out, req.Send() +} + +// ExportImageWithContext is the same as ExportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ExportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportImageWithContext(ctx aws.Context, input *ExportImageInput, opts ...request.Option) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opExportTransitGatewayRoutes = "ExportTransitGatewayRoutes" // ExportTransitGatewayRoutesRequest generates a "aws/request.Request" representing the @@ -24121,6 +24272,12 @@ func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUs // GetCapacityReservationUsage API operation for Amazon Elastic Compute Cloud. // +// Gets usage information about a Capacity Reservation. If the Capacity Reservation +// is shared, it shows usage information for the Capacity Reservation owner +// and each AWS account that is currently using the shared capacity. If the +// Capacity Reservation is not shared, it shows only the Capacity Reservation +// owner's usage. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -25943,8 +26100,35 @@ func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, // // Modifies the specified EC2 Fleet. // +// You can only modify an EC2 Fleet request of type maintain. +// // While the EC2 Fleet is being modified, it is in the modifying state. // +// To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches +// the additional Spot Instances according to the allocation strategy for the +// EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet +// launches instances using the Spot Instance pool with the lowest price. If +// the allocation strategy is diversified, the EC2 Fleet distributes the instances +// across the Spot Instance pools. If the allocation strategy is capacityOptimized, +// EC2 Fleet launches instances from Spot Instance pools with optimal capacity +// for the number of instances that are launching. +// +// To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the EC2 Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the EC2 Fleet terminates the instances with the highest price +// per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet +// terminates the instances in the Spot Instance pools that have the least available +// Spot Instance capacity. If the allocation strategy is diversified, the EC2 +// Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the EC2 Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your EC2 Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -27097,6 +27281,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // or remove specified AWS account IDs from a snapshot's list of create volume // permissions, but you cannot do both in a single operation. If you need to // both add and remove account IDs for a snapshot, you must use multiple operations. +// You can make up to 500 modifications to a snapshot in a single operation. // // Encrypted snapshots and snapshots with AWS Marketplace product codes cannot // be made public. Snapshots encrypted with your default CMK cannot be shared @@ -27187,19 +27372,24 @@ func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) // To scale up your Spot Fleet, increase its target capacity. The Spot Fleet // launches the additional Spot Instances according to the allocation strategy // for the Spot Fleet request. If the allocation strategy is lowestPrice, the -// Spot Fleet launches instances using the Spot pool with the lowest price. -// If the allocation strategy is diversified, the Spot Fleet distributes the -// instances across the Spot pools. +// Spot Fleet launches instances using the Spot Instance pool with the lowest +// price. If the allocation strategy is diversified, the Spot Fleet distributes +// the instances across the Spot Instance pools. If the allocation strategy +// is capacityOptimized, Spot Fleet launches instances from Spot Instance pools +// with optimal capacity for the number of instances that are launching. // // To scale down your Spot Fleet, decrease its target capacity. First, the Spot // Fleet cancels any open requests that exceed the new target capacity. You // can request that the Spot Fleet terminate Spot Instances until the size of // the fleet no longer exceeds the new target capacity. If the allocation strategy // is lowestPrice, the Spot Fleet terminates the instances with the highest -// price per unit. If the allocation strategy is diversified, the Spot Fleet -// terminates instances across the Spot pools. Alternatively, you can request -// that the Spot Fleet keep the fleet at its current size, but not replace any -// Spot Instances that are interrupted or that you terminate manually. +// price per unit. If the allocation strategy is capacityOptimized, the Spot +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the Spot Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. // // If you are finished with your Spot Fleet for now, but will use it again later, // you can set the target capacity to 0. @@ -28484,6 +28674,80 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" + +// ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelCertificate for more information on using the ModifyVpnTunnelCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelCertificateRequest method. +// req, resp := client.ModifyVpnTunnelCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificateRequest(input *ModifyVpnTunnelCertificateInput) (req *request.Request, output *ModifyVpnTunnelCertificateOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelCertificateInput{} + } + + output = &ModifyVpnTunnelCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelCertificate API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPN tunnel endpoint certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelCertificate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificate(input *ModifyVpnTunnelCertificateInput) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelCertificateWithContext is the same as ModifyVpnTunnelCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelCertificateWithContext(ctx aws.Context, input *ModifyVpnTunnelCertificateInput, opts ...request.Option) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -30179,10 +30443,10 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // You can submit a single request that includes multiple launch specifications // that vary by instance type, AMI, Availability Zone, or subnet. // -// By default, the Spot Fleet requests Spot Instances in the Spot pool where -// the price per unit is the lowest. Each launch specification can include its -// own instance weighting that reflects the value of the instance type to your -// application workload. +// By default, the Spot Fleet requests Spot Instances in the Spot Instance pool +// where the price per unit is the lowest. Each launch specification can include +// its own instance weighting that reflects the value of the instance type to +// your application workload. // // Alternatively, you can specify that the Spot Fleet distribute the target // capacity across the Spot pools included in its launch specifications. By @@ -31384,6 +31648,98 @@ func (c *EC2) SearchTransitGatewayRoutesWithContext(ctx aws.Context, input *Sear return out, req.Send() } +const opSendDiagnosticInterrupt = "SendDiagnosticInterrupt" + +// SendDiagnosticInterruptRequest generates a "aws/request.Request" representing the +// client's request for the SendDiagnosticInterrupt operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendDiagnosticInterrupt for more information on using the SendDiagnosticInterrupt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendDiagnosticInterruptRequest method. +// req, resp := client.SendDiagnosticInterruptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput) (req *request.Request, output *SendDiagnosticInterruptOutput) { + op := &request.Operation{ + Name: opSendDiagnosticInterrupt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendDiagnosticInterruptInput{} + } + + output = &SendDiagnosticInterruptOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendDiagnosticInterrupt API operation for Amazon Elastic Compute Cloud. +// +// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger +// a kernel panic (on Linux instances), or a blue screen/stop error (on Windows +// instances). For instances based on Intel and AMD processors, the interrupt +// is received as a non-maskable interrupt (NMI). +// +// In general, the operating system crashes and reboots when a kernel panic +// or stop error is triggered. The operating system can also be configured to +// perform diagnostic tasks, such as generating a memory dump file, loading +// a secondary kernel, or obtaining a call trace. +// +// Before sending a diagnostic interrupt to your instance, ensure that its operating +// system is configured to perform the required diagnostic tasks. +// +// For more information about configuring your operating system to generate +// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic +// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) +// (Windows instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SendDiagnosticInterrupt for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterrupt(input *SendDiagnosticInterruptInput) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + return out, req.Send() +} + +// SendDiagnosticInterruptWithContext is the same as SendDiagnosticInterrupt with the addition of +// the ability to pass a context and additional request options. +// +// See SendDiagnosticInterrupt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SendDiagnosticInterruptWithContext(ctx aws.Context, input *SendDiagnosticInterruptInput, opts ...request.Option) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the @@ -36457,12 +36813,14 @@ type CapacityReservation struct { // The Availability Zone in which the capacity is reserved. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The Availability Zone ID of the Capacity Reservation. AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` // The remaining capacity. Indicates the number of instances that can be launched // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The Amazon Resource Name (ARN) of the Capacity Reservation. CapacityReservationArn *string `locationName:"capacityReservationArn" type:"string"` // The ID of the Capacity Reservation. @@ -36519,6 +36877,7 @@ type CapacityReservation struct { // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // The ID of the AWS account that owns the Capacity Reservation. OwnerId *string `locationName:"ownerId" type:"string"` // The current state of the Capacity Reservation. A Capacity Reservation can @@ -36527,11 +36886,11 @@ type CapacityReservation struct { // * active - The Capacity Reservation is active and the capacity is available // for your use. // - // * cancelled - The Capacity Reservation expired automatically at the date + // * expired - The Capacity Reservation expired automatically at the date // and time specified in your request. The reserved capacity is no longer // available for your use. // - // * expired - The Capacity Reservation was manually cancelled. The reserved + // * cancelled - The Capacity Reservation was manually cancelled. The reserved // capacity is no longer available for your use. // // * pending - The Capacity Reservation request was successful but the capacity @@ -36555,7 +36914,8 @@ type CapacityReservation struct { // that is dedicated to a single AWS account. Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` - // The number of instances for which the Capacity Reservation reserves capacity. + // The total number of instances for which the Capacity Reservation reserves + // capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -37515,8 +37875,7 @@ type ClientVpnEndpoint struct { // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` - // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint. // // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) @@ -38649,9 +39008,8 @@ type CpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -38683,6 +39041,7 @@ type CreateCapacityReservationInput struct { // The Availability Zone in which to create the Capacity Reservation. AvailabilityZone *string `type:"string"` + // The ID of the Availability Zone in which to create the Capacity Reservation. AvailabilityZoneId *string `type:"string"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -38958,8 +39317,7 @@ type CreateClientVpnEndpointInput struct { // Information about the DNS servers to be used for DNS resolution. A Client // VPN endpoint can have up to two DNS servers. If no DNS server is specified, - // the DNS address of the VPC that is to be associated with Client VPN endpoint - // is used as the DNS server. + // the DNS address configured on the device is used for the DNS server. DnsServers []*string `locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -38974,8 +39332,7 @@ type CreateClientVpnEndpointInput struct { // ServerCertificateArn is a required field ServerCertificateArn *string `type:"string" required:"true"` - // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint. // // By default, split-tunnel on a VPN endpoint is disabled. // @@ -39274,6 +39631,9 @@ type CreateCustomerGatewayInput struct { // BgpAsn is a required field BgpAsn *int64 `type:"integer" required:"true"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -39282,9 +39642,7 @@ type CreateCustomerGatewayInput struct { // The Internet-routable IP address for the customer gateway's outside interface. // The address must be static. - // - // PublicIp is a required field - PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + PublicIp *string `locationName:"IpAddress" type:"string"` // The type of VPN connection that this customer gateway supports (ipsec.1). // @@ -39308,9 +39666,6 @@ func (s *CreateCustomerGatewayInput) Validate() error { if s.BgpAsn == nil { invalidParams.Add(request.NewErrParamRequired("BgpAsn")) } - if s.PublicIp == nil { - invalidParams.Add(request.NewErrParamRequired("PublicIp")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -39327,6 +39682,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { + s.CertificateArn = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput { s.DryRun = &v @@ -40835,7 +41196,9 @@ type CreateLaunchTemplateVersionInput struct { // The version number of the launch template version on which to base the new // version. The new version inherits the same launch parameters as the source - // version, except for parameters that you specify in LaunchTemplateData. + // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // applied to the block device mapping are ignored when creating a new version + // unless they are explicitly included. SourceVersion *string `type:"string"` // A description for the version of the launch template. @@ -43607,10 +43970,7 @@ type CreateVolumeInput struct { // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. // - // Defaults: If no volume type is specified, the default is standard in us-east-1, - // eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, - // ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, - // and cn-north-1. In all other Regions, EBS defaults to gp2. + // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` } @@ -44718,6 +45078,9 @@ type CustomerGateway struct { // (ASN). BgpAsn *string `locationName:"bgpAsn" type:"string"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The ID of the customer gateway. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` @@ -44751,6 +45114,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { + s.CertificateArn = &v + return s +} + // SetCustomerGatewayId sets the CustomerGatewayId field's value. func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway { s.CustomerGatewayId = &v @@ -49997,6 +50366,115 @@ func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusO return s } +type DescribeExportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the export image tasks. + ExportImageTaskIds []*string `locationName:"ExportImageTaskId" locationNameList:"ExportImageTaskId" type:"list"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeExportImageTasksInput) SetDryRun(v bool) *DescribeExportImageTasksInput { + s.DryRun = &v + return s +} + +// SetExportImageTaskIds sets the ExportImageTaskIds field's value. +func (s *DescribeExportImageTasksInput) SetExportImageTaskIds(v []*string) *DescribeExportImageTasksInput { + s.ExportImageTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportImageTasksInput) SetFilters(v []*Filter) *DescribeExportImageTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeExportImageTasksInput) SetMaxResults(v int64) *DescribeExportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksInput) SetNextToken(v string) *DescribeExportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export image tasks. + ExportImageTasks []*ExportImageTask `locationName:"exportImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksOutput) GoString() string { + return s.String() +} + +// SetExportImageTasks sets the ExportImageTasks field's value. +func (s *DescribeExportImageTasksOutput) SetExportImageTasks(v []*ExportImageTask) *DescribeExportImageTasksOutput { + s.ExportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksOutput) SetNextToken(v string) *DescribeExportImageTasksOutput { + s.NextToken = &v + return s +} + type DescribeExportTasksInput struct { _ struct{} `type:"structure"` @@ -51807,14 +52285,13 @@ type DescribeImportImageTasksInput struct { DryRun *bool `type:"boolean"` // Filter tasks using the task-state filter and one of the following values: - // active, completed, deleting, deleted. + // active, completed, deleting, or deleted. Filters []*Filter `locationNameList:"Filter" type:"list"` - // A list of import image task IDs. + // The IDs of the import image tasks. ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` - // The maximum number of results to return in a single call. To retrieve the - // remaining results, make another call with the returned NextToken value. + // The maximum number of results to return in a single call. MaxResults *int64 `type:"integer"` // A token that indicates the next page of results. @@ -54549,6 +55026,9 @@ type DescribeRegionsInput struct { // // * endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com). // + // * opt-in-status - The opt-in status of the Region (opt-in-not-required + // | opted-in | not-opted-in). + // // * region-name - The name of the Region (for example, us-east-1). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -60360,6 +60840,19 @@ type DetachNetworkInterfaceInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // Specifies whether to force a detachment. + // + // * Use the Force parameter only as a last resort to detach a network interface + // from a failed instance. + // + // * If you use the Force parameter to detach a network interface, you might + // not be able to attach a different network interface to the same index + // on the instance without first stopping and starting the instance. + // + // * If you force the detachment of a network interface, the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // might not get updated. This means that the attributes associated with + // the detached network interface might still be visible. The instance metadata + // will get updated when you stop and start the instance. Force *bool `locationName:"force" type:"boolean"` } @@ -61909,9 +62402,10 @@ type EbsBlockDevice struct { // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also set the Iops property. + // The volume type. If you set the type to io1, you must also specify the IOPS + // that the volume supports. // - // Default: standard + // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -63035,6 +63529,295 @@ func (s *ExportClientVpnClientConfigurationOutput) SetClientConfiguration(v stri return s } +type ExportImageInput struct { + _ struct{} `type:"structure"` + + // Token to enable idempotency for export image requests. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A description of the image being exported. The maximum length is 255 bytes. + Description *string `type:"string"` + + // The disk image format. + // + // DiskImageFormat is a required field + DiskImageFormat *string `type:"string" required:"true" enum:"DiskImageFormat"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. If this parameter is not specified, the default role is + // named 'vmimport'. + RoleName *string `type:"string"` + + // Information about the destination S3 bucket. The bucket must exist and grant + // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // + // S3ExportLocation is a required field + S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportImageInput"} + if s.DiskImageFormat == nil { + invalidParams.Add(request.NewErrParamRequired("DiskImageFormat")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.S3ExportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportLocation")) + } + if s.S3ExportLocation != nil { + if err := s.S3ExportLocation.Validate(); err != nil { + invalidParams.AddNested("S3ExportLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportImageInput) SetClientToken(v string) *ExportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ExportImageInput) SetDescription(v string) *ExportImageInput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageInput) SetDiskImageFormat(v string) *ExportImageInput { + s.DiskImageFormat = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportImageInput) SetDryRun(v bool) *ExportImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageInput) SetImageId(v string) *ExportImageInput { + s.ImageId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageInput) SetRoleName(v string) *ExportImageInput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) *ExportImageInput { + s.S3ExportLocation = v + return s +} + +type ExportImageOutput struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The disk image format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. + RoleName *string `locationName:"roleName" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageOutput) SetDescription(v string) *ExportImageOutput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageOutput) SetDiskImageFormat(v string) *ExportImageOutput { + s.DiskImageFormat = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageOutput) SetExportImageTaskId(v string) *ExportImageOutput { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageOutput) SetImageId(v string) *ExportImageOutput { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageOutput) SetProgress(v string) *ExportImageOutput { + s.Progress = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageOutput) SetRoleName(v string) *ExportImageOutput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageOutput) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageOutput { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageOutput) SetStatus(v string) *ExportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { + s.StatusMessage = &v + return s +} + +// Describes an export image task. +type ExportImageTask struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageTask) SetDescription(v string) *ExportImageTask { + s.Description = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageTask) SetExportImageTaskId(v string) *ExportImageTask { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageTask) SetImageId(v string) *ExportImageTask { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageTask) SetProgress(v string) *ExportImageTask { + s.Progress = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageTask) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageTask { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageTask) SetStatus(v string) *ExportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { + s.StatusMessage = &v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -63104,6 +63887,87 @@ func (s *ExportTask) SetStatusMessage(v string) *ExportTask { return s } +// Describes the destination for an export image task. +type ExportTaskS3Location struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3Location) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3Location) SetS3Bucket(v string) *ExportTaskS3Location { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { + s.S3Prefix = &v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3LocationRequest struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3LocationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3LocationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTaskS3LocationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTaskS3LocationRequest"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3LocationRequest) SetS3Bucket(v string) *ExportTaskS3LocationRequest { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3LocationRequest { + s.S3Prefix = &v + return s +} + // Describes the format and location for an instance export task. type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -64356,13 +65220,25 @@ func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { type GetCapacityReservationUsageInput struct { _ struct{} `type:"structure"` + // The ID of the Capacity Reservation. + // // CapacityReservationId is a required field CapacityReservationId *string `type:"string" required:"true"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. + // + // Valid range: Minimum value of 1. Maximum value of 1000. MaxResults *int64 `min:"1" type:"integer"` + // The token to retrieve the next page of results. NextToken *string `type:"string"` } @@ -64419,18 +65295,45 @@ func (s *GetCapacityReservationUsageInput) SetNextToken(v string) *GetCapacityRe type GetCapacityReservationUsageOutput struct { _ struct{} `type:"structure"` + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The ID of the Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // Information about the Capacity Reservation usage. InstanceUsages []*InstanceUsage `locationName:"instanceUsageSet" locationNameList:"item" type:"list"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + // The number of instances for which the Capacity Reservation reserves capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -69240,8 +70143,8 @@ type InstanceNetworkInterfaceSpecification struct { // request. SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an instance. + // The ID of the subnet associated with the network interface. Applies only + // if creating a network interface when launching an instance. SubnetId *string `locationName:"subnetId" type:"string"` } @@ -69740,11 +70643,14 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { return s } +// Information about the Capacity Reservation usage. type InstanceUsage struct { _ struct{} `type:"structure"` + // The ID of the AWS account that is making use of the Capacity Reservation. AccountId *string `locationName:"accountId" type:"string"` + // The number of instances the AWS account currently has in the Capacity Reservation. UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` } @@ -70706,9 +71612,8 @@ type LaunchTemplateCpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -72369,7 +73274,7 @@ func (s *ModifyCapacityReservationInput) SetInstanceCount(v int64) *ModifyCapaci type ModifyCapacityReservationOutput struct { _ struct{} `type:"structure"` - // Information about the Capacity Reservation. + // Returns true if the request succeeds; otherwise, it returns an error. Return *bool `locationName:"return" type:"boolean"` } @@ -75389,8 +76294,7 @@ type ModifyVpcEndpointInput struct { DryRun *bool `type:"boolean"` // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. If this parameter is not specified, - // we attach a default policy that allows full access to the service. + // policy must be in valid JSON format. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicate whether a private hosted zone is associated @@ -75915,6 +76819,9 @@ func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput type ModifyVpnConnectionInput struct { _ struct{} `type:"structure"` + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -75956,6 +76863,12 @@ func (s *ModifyVpnConnectionInput) Validate() error { return nil } +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetCustomerGatewayId(v string) *ModifyVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ModifyVpnConnectionInput) SetDryRun(v bool) *ModifyVpnConnectionInput { s.DryRun = &v @@ -76003,6 +76916,93 @@ func (s *ModifyVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *ModifyVp return s } +type ModifyVpnTunnelCertificateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelCertificateInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelCertificateInput) SetDryRun(v bool) *ModifyVpnTunnelCertificateInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnConnectionId(v string) *ModifyVpnTunnelCertificateInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelCertificateInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelCertificateOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelCertificateOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelCertificateOutput { + s.VpnConnection = v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -80982,7 +81982,9 @@ type RequestSpotLaunchSpecification struct { // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` - // The ID of the subnet in which to launch the instance. + // The IDs of the subnets in which to launch the instance. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data for the instance. User data is limited to 16 @@ -84640,7 +85642,7 @@ type ScheduledInstancesEbs struct { // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. // - // Default: standard + // Default: gp2 VolumeType *string `type:"string"` } @@ -85165,7 +86167,7 @@ type SearchTransitGatewayRoutesInput struct { // // * state - The state of the route (active | blackhole). // - // * type - The type of roue (propagated | static). + // * type - The type of route (propagated | static). // // Filters is a required field Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` @@ -85426,6 +86428,70 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr return s } +type SendDiagnosticInterruptInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendDiagnosticInterruptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendDiagnosticInterruptInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SendDiagnosticInterruptInput) SetDryRun(v bool) *SendDiagnosticInterruptInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SendDiagnosticInterruptInput) SetInstanceId(v string) *SendDiagnosticInterruptInput { + s.InstanceId = &v + return s +} + +type SendDiagnosticInterruptOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptOutput) GoString() string { + return s.String() +} + // Describes a service configuration for a VPC endpoint service. type ServiceConfiguration struct { _ struct{} `type:"structure"` @@ -86378,7 +87444,7 @@ type SpotFleetLaunchSpecification struct { // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block devices that are mapped to the Spot instances. You can't + // One or more block devices that are mapped to the Spot Instances. You can't // specify both a snapshot ID and an encryption value. This is because only // blank volumes can be encrypted on creation. If a snapshot is the basis for // a volume, it is not blank and its encryption status is used for the volume @@ -86436,8 +87502,9 @@ type SpotFleetLaunchSpecification struct { // by the value of WeightedCapacity. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The ID of the subnet in which to launch the instances. To specify multiple - // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + // The IDs of the subnets in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The tags to apply during creation. @@ -86668,8 +87735,19 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the Spot Fleet request. + // + // If the allocation strategy is lowestPrice, Spot Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, Spot Fleet launches instances + // from all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, Spot Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` // A unique, case-sensitive identifier that you provide to ensure the idempotency @@ -87362,8 +88440,19 @@ func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { type SpotOptions struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowest-price. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -87447,8 +88536,19 @@ func (s *SpotOptions) SetSingleInstanceType(v bool) *SpotOptions { type SpotOptionsRequest struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -91222,6 +92322,9 @@ type VgwTelemetry struct { // The number of accepted routes. AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The date and time of the last change in status. LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` @@ -91252,6 +92355,12 @@ func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *VgwTelemetry) SetCertificateArn(v string) *VgwTelemetry { + s.CertificateArn = &v + return s +} + // SetLastStatusChange sets the LastStatusChange field's value. func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry { s.LastStatusChange = &v @@ -94333,6 +95442,9 @@ const ( // InstanceTypeI3en24xlarge is a InstanceType enum value InstanceTypeI3en24xlarge = "i3en.24xlarge" + // InstanceTypeI3enMetal is a InstanceType enum value + InstanceTypeI3enMetal = "i3en.metal" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 7b42719d653..7c9fccd7c8f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -38,8 +38,8 @@ func customRetryRule(r *request.Request) time.Duration { count = len(retryTimes) - 1 } - minTime := int(retryTimes[count]) - return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime) + minTime := int64(retryTimes[count]) + return time.Duration(sdkrand.SeededRand.Int63n(minTime) + minTime) } func setCustomRetryer(c *client.Client) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index e403b84a442..31c314e0e5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -9,7 +9,7 @@ // // To learn more, see the following resources: // -// * Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon +// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon // EC2 documentation (http://aws.amazon.com/documentation/ec2) // // * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 8e473d0fe92..0327865eeec 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,9 +14,10 @@ package expfmt import ( - "bytes" + "bufio" "fmt" "io" + "io/ioutil" "math" "strconv" "strings" @@ -27,7 +28,7 @@ import ( dto "github.com/prometheus/client_model/go" ) -// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer // implements it. type enhancedWriter interface { io.Writer @@ -37,14 +38,13 @@ type enhancedWriter interface { } const ( - initialBufSize = 512 initialNumBufSize = 24 ) var ( bufPool = sync.Pool{ New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + return bufio.NewWriter(ioutil.Discard) }, } numBufPool = sync.Pool{ @@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } // Try the interface upgrade. If it doesn't work, we'll use a - // bytes.Buffer from the sync.Pool and write out its content to out in a - // single go in the end. + // bufio.Writer from the sync.Pool. w, ok := out.(enhancedWriter) if !ok { - b := bufPool.Get().(*bytes.Buffer) - b.Reset() + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) w = b defer func() { - bWritten, bErr := out.Write(b.Bytes()) - written = bWritten + bErr := b.Flush() if err == nil { err = bErr } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ec3d86ba7ce..342e5940d0f 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +755,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index 439d76839b8..973be2809ba 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -196,9 +196,11 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { // nodeAddresses returns the provided node's address, based on the priority: // 1. NodeInternalIP -// 2. NodeExternalIP -// 3. NodeLegacyHostIP -// 3. NodeHostName +// 2. NodeInternalDNS +// 3. NodeExternalIP +// 4. NodeExternalDNS +// 5. NodeLegacyHostIP +// 6. NodeHostName // // Derived from k8s.io/kubernetes/pkg/util/node/node.go func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { @@ -210,9 +212,15 @@ func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, if addresses, ok := m[apiv1.NodeInternalIP]; ok { return addresses[0], m, nil } + if addresses, ok := m[apiv1.NodeInternalDNS]; ok { + return addresses[0], m, nil + } if addresses, ok := m[apiv1.NodeExternalIP]; ok { return addresses[0], m, nil } + if addresses, ok := m[apiv1.NodeExternalDNS]; ok { + return addresses[0], m, nil + } if addresses, ok := m[apiv1.NodeAddressType(NodeLegacyHostIP)]; ok { return addresses[0], m, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 1dbdecc8d0f..4625e42a317 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -191,6 +191,9 @@ func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) e } } m.cancelDiscoverers() + m.targets = make(map[poolKey]map[string]*targetgroup.Group) + m.providers = nil + m.discoverCancel = nil for name, scfg := range cfg { m.registerProviders(scfg, name) discoveredTargets.WithLabelValues(m.name, name).Set(0) @@ -280,9 +283,6 @@ func (m *Manager) cancelDiscoverers() { for _, c := range m.discoverCancel { c() } - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil } func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 0df441aec18..640ce4c59d6 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -567,7 +567,7 @@ func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { apiLabelSet := models.LabelSet{} for _, label := range modelLabelSet { - apiLabelSet[label.Name] = string(label.Value) + apiLabelSet[label.Name] = label.Value } return apiLabelSet @@ -594,7 +594,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b return errors.Errorf("bad response status %s", resp.Status) } - return err + return nil } // Stop shuts down the notification handler. diff --git a/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go b/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go index 3d4fddb75b4..6cb9d583c6c 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go +++ b/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go @@ -20,7 +20,7 @@ type Gate struct { ch chan struct{} } -// NewGate returns a query gate that limits the number of queries +// New returns a query gate that limits the number of queries // being concurrently executed. func New(length int) *Gate { return &Gate{ diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go index b89470d38a6..001d0476fa3 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go @@ -113,7 +113,7 @@ func (r *Rule) Validate() (errs []error) { if r.Expr == "" { errs = append(errs, errors.Errorf("field 'expr' must be set in rule")) } else if _, err := promql.ParseExpr(r.Expr); err != nil { - errs = append(errs, errors.Errorf("could not parse expression: %s", err)) + errs = append(errs, errors.Wrap(err, "could not parse expression")) } if r.Record != "" { if len(r.Annotations) > 0 { @@ -143,8 +143,7 @@ func (r *Rule) Validate() (errs []error) { } } - errs = append(errs, testTemplateParsing(r)...) - return errs + return append(errs, testTemplateParsing(r)...) } // testTemplateParsing checks if the templates used in labels and annotations @@ -176,18 +175,18 @@ func testTemplateParsing(rl *Rule) (errs []error) { } // Parsing Labels. - for _, val := range rl.Labels { + for k, val := range rl.Labels { err := parseTest(val) if err != nil { - errs = append(errs, errors.Errorf("msg=%s", err.Error())) + errs = append(errs, errors.Wrapf(err, "label %q", k)) } } // Parsing Annotations. - for _, val := range rl.Annotations { + for k, val := range rl.Annotations { err := parseTest(val) if err != nil { - errs = append(errs, errors.Errorf("msg=%s", err.Error())) + errs = append(errs, errors.Wrapf(err, "annotation %q", k)) } } @@ -207,7 +206,11 @@ func Parse(content []byte) (*RuleGroups, []error) { func ParseFile(file string) (*RuleGroups, []error) { b, err := ioutil.ReadFile(file) if err != nil { - return nil, []error{err} + return nil, []error{errors.Wrap(err, file)} + } + rgs, errs := Parse(b) + for i := range errs { + errs[i] = errors.Wrap(errs[i], file) } - return Parse(b) + return rgs, errs } diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go index a1b0d6977e8..ed76bc39578 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go @@ -20,7 +20,6 @@ import ( "io" "math" "sort" - "strconv" "strings" "unicode/utf8" @@ -44,7 +43,10 @@ func (l *openMetricsLexer) buf() []byte { } func (l *openMetricsLexer) cur() byte { - return l.b[l.i] + if l.i < len(l.b) { + return l.b[l.i] + } + return byte(' ') } // next advances the openMetricsLexer to the next character. @@ -85,7 +87,7 @@ type OpenMetricsParser struct { offsets []int } -// New returns a new parser of the byte slice. +// NewOpenMetricsParser returns a new parser of the byte slice. func NewOpenMetricsParser(b []byte) Parser { return &OpenMetricsParser{l: &openMetricsLexer{b: b}} } @@ -265,7 +267,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { if t2 != tValue { return EntryInvalid, parseError("expected value after metric", t) } - if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()[1:]), 64); err != nil { + if p.val, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { return EntryInvalid, err } // Ensure canonical NaN value. @@ -280,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. - if ts, err = strconv.ParseFloat(yoloString(p.l.buf()[1:]), 64); err != nil { + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { return EntryInvalid, err } p.ts = int64(ts * 1000) diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go index f05b87abd3b..69fa51c3ddd 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go @@ -153,7 +153,7 @@ type PromParser struct { offsets []int } -// New returns a new parser of the byte slice. +// NewPromParser returns a new parser of the byte slice. func NewPromParser(b []byte) Parser { return &PromParser{l: &promlexer{b: append(b, '\n')}} } @@ -332,7 +332,7 @@ func (p *PromParser) Next() (Entry, error) { if t2 != tValue { return EntryInvalid, parseError("expected value after metric", t) } - if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()), 64); err != nil { + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { return EntryInvalid, err } // Ensure canonical NaN value. @@ -409,3 +409,11 @@ var helpReplacer = strings.NewReplacer( func yoloString(b []byte) string { return *((*string)(unsafe.Pointer(&b))) } + +func parseFloat(s string) (float64, error) { + // Keep to pre-Go 1.13 float formats. + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go index 362d9bf1338..5dcb254f2c7 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -24,6 +24,43 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +type ReadRequest_ResponseType int32 + +const ( + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + ReadRequest_SAMPLES ReadRequest_ResponseType = 0 + // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1 +) + +var ReadRequest_ResponseType_name = map[int32]string{ + 0: "SAMPLES", + 1: "STREAMED_XOR_CHUNKS", +} + +var ReadRequest_ResponseType_value = map[string]int32{ + "SAMPLES": 0, + "STREAMED_XOR_CHUNKS": 1, +} + +func (x ReadRequest_ResponseType) String() string { + return proto.EnumName(ReadRequest_ResponseType_name, int32(x)) +} + +func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{1, 0} +} + type WriteRequest struct { Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -71,11 +108,18 @@ func (m *WriteRequest) GetTimeseries() []TimeSeries { return nil } +// ReadRequest represents a remote read request. type ReadRequest struct { - Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ReadRequest) Reset() { *m = ReadRequest{} } @@ -118,6 +162,14 @@ func (m *ReadRequest) GetQueries() []*Query { return nil } +func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType { + if m != nil { + return m.AcceptedResponseTypes + } + return nil +} + +// ReadResponse is a response when response_type equals SAMPLES. type ReadResponse struct { // In same order as the request's queries. Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` @@ -285,39 +337,110 @@ func (m *QueryResult) GetTimeseries() []*TimeSeries { return nil } +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. +type ChunkedReadResponse struct { + ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"` + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} } +func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } +func (*ChunkedReadResponse) ProtoMessage() {} +func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{5} +} +func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedReadResponse.Merge(m, src) +} +func (m *ChunkedReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ChunkedReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo + +func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries { + if m != nil { + return m.ChunkedSeries + } + return nil +} + +func (m *ChunkedReadResponse) GetQueryIndex() int64 { + if m != nil { + return m.QueryIndex + } + return 0 +} + func init() { + proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") proto.RegisterType((*Query)(nil), "prometheus.Query") proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult") + proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse") } func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } var fileDescriptor_eefc82927d57d89b = []byte{ - // 333 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbf, 0x4a, 0x2b, 0x41, - 0x14, 0xc6, 0xef, 0xdc, 0xfc, 0xbb, 0x9c, 0x0d, 0x97, 0xdc, 0x21, 0x57, 0x97, 0x14, 0x31, 0x6c, - 0xb5, 0x10, 0x89, 0x18, 0xc5, 0x42, 0x6c, 0x0c, 0x08, 0x16, 0x49, 0xe1, 0x18, 0x10, 0x6c, 0xc2, - 0xc6, 0x1c, 0x92, 0x85, 0xcc, 0xce, 0x66, 0xe6, 0x6c, 0x91, 0xd7, 0xb3, 0x4a, 0xe9, 0x13, 0x88, - 0xe4, 0x49, 0x64, 0x67, 0x59, 0x1d, 0xb1, 0xb1, 0x1b, 0xe6, 0xf7, 0xfb, 0x3e, 0xce, 0xe1, 0x40, - 0x53, 0xa3, 0x54, 0x84, 0x83, 0x54, 0x2b, 0x52, 0x1c, 0x52, 0xad, 0x24, 0xd2, 0x0a, 0x33, 0xd3, - 0xf1, 0x68, 0x9b, 0xa2, 0x29, 0x40, 0xa7, 0xbd, 0x54, 0x4b, 0x65, 0x9f, 0x27, 0xf9, 0xab, 0xf8, - 0x0d, 0xc6, 0xd0, 0x7c, 0xd0, 0x31, 0xa1, 0xc0, 0x4d, 0x86, 0x86, 0xf8, 0x15, 0x00, 0xc5, 0x12, - 0x0d, 0xea, 0x18, 0x8d, 0xcf, 0x7a, 0x95, 0xd0, 0x1b, 0x1e, 0x0c, 0x3e, 0x3b, 0x07, 0xd3, 0x58, - 0xe2, 0xbd, 0xa5, 0xa3, 0xea, 0xee, 0xf5, 0xe8, 0x97, 0x70, 0xfc, 0xe0, 0x12, 0x3c, 0x81, 0xd1, - 0xa2, 0x2c, 0xeb, 0x43, 0x63, 0x93, 0xb9, 0x4d, 0xff, 0xdc, 0xa6, 0xbb, 0x0c, 0xf5, 0x56, 0x94, - 0x46, 0x70, 0x0d, 0xcd, 0x22, 0x6b, 0x52, 0x95, 0x18, 0xe4, 0xa7, 0xd0, 0xd0, 0x68, 0xb2, 0x35, - 0x95, 0xe1, 0xc3, 0xef, 0x61, 0xcb, 0x45, 0xe9, 0x05, 0xcf, 0x0c, 0x6a, 0x16, 0xf0, 0x63, 0xe0, - 0x86, 0x22, 0x4d, 0x33, 0x3b, 0x1c, 0x45, 0x32, 0x9d, 0xc9, 0xbc, 0x87, 0x85, 0x15, 0xd1, 0xb2, - 0x64, 0x5a, 0x82, 0x89, 0xe1, 0x21, 0xb4, 0x30, 0x59, 0x7c, 0x75, 0x7f, 0x5b, 0xf7, 0x2f, 0x26, - 0x0b, 0xd7, 0x3c, 0x87, 0x3f, 0x32, 0xa2, 0xa7, 0x15, 0x6a, 0xe3, 0x57, 0xec, 0x54, 0xbe, 0x3b, - 0xd5, 0x38, 0x9a, 0xe3, 0x7a, 0x52, 0x08, 0xe2, 0xc3, 0xe4, 0x7d, 0xa8, 0xad, 0xe2, 0x84, 0x8c, - 0x5f, 0xed, 0xb1, 0xd0, 0x1b, 0xfe, 0x77, 0x23, 0xf9, 0xce, 0xb7, 0x39, 0x14, 0x85, 0x13, 0xdc, - 0x80, 0xe7, 0x2c, 0xc7, 0x2f, 0x7e, 0x7e, 0x10, 0xf7, 0x14, 0xa3, 0xf6, 0x6e, 0xdf, 0x65, 0x2f, - 0xfb, 0x2e, 0x7b, 0xdb, 0x77, 0xd9, 0x63, 0x3d, 0x0f, 0xa4, 0xf3, 0x79, 0xdd, 0x5e, 0xfd, 0xec, - 0x3d, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xb6, 0x05, 0x1c, 0x34, 0x02, 0x00, 0x00, + // 466 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xbb, 0x4d, 0xdb, 0xa0, 0x71, 0x88, 0xc2, 0xb6, 0x25, 0xa6, 0x87, 0x34, 0xb2, 0x38, + 0x58, 0x2a, 0x0a, 0x22, 0x54, 0x9c, 0x38, 0x90, 0x96, 0x48, 0x45, 0x24, 0xfc, 0x59, 0x07, 0x81, + 0x10, 0x92, 0xe5, 0xd8, 0xa3, 0xc6, 0xa2, 0xfe, 0xd3, 0xdd, 0xb5, 0xd4, 0xbc, 0x1e, 0xa7, 0x9e, + 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0xed, 0xda, 0x0e, 0x1b, 0xb8, 0x70, 0x5b, 0x7f, 0xdf, 0x37, + 0x3f, 0xef, 0x8c, 0xc7, 0xd0, 0xe2, 0x98, 0x64, 0x12, 0x07, 0x39, 0xcf, 0x64, 0x46, 0x21, 0xe7, + 0x59, 0x82, 0x72, 0x81, 0x85, 0x38, 0xb2, 0xe4, 0x32, 0x47, 0x51, 0x1a, 0x47, 0x07, 0x97, 0xd9, + 0x65, 0xa6, 0x8f, 0x8f, 0xd5, 0xa9, 0x54, 0x9d, 0x09, 0xb4, 0x3e, 0xf2, 0x58, 0x22, 0xc3, 0xeb, + 0x02, 0x85, 0xa4, 0xcf, 0x01, 0x64, 0x9c, 0xa0, 0x40, 0x1e, 0xa3, 0xb0, 0x49, 0xbf, 0xe1, 0x5a, + 0xc3, 0xfb, 0x83, 0x3f, 0xcc, 0xc1, 0x2c, 0x4e, 0xd0, 0xd3, 0xee, 0xd9, 0xce, 0xed, 0xcf, 0xe3, + 0x2d, 0x66, 0xe4, 0x9d, 0xef, 0x04, 0x2c, 0x86, 0x41, 0x54, 0xd3, 0x4e, 0xa0, 0x79, 0x5d, 0x98, + 0xa8, 0x7b, 0x26, 0xea, 0x7d, 0x81, 0x7c, 0xc9, 0xea, 0x04, 0xfd, 0x02, 0xdd, 0x20, 0x0c, 0x31, + 0x97, 0x18, 0xf9, 0x1c, 0x45, 0x9e, 0xa5, 0x02, 0x7d, 0xdd, 0x81, 0xbd, 0xdd, 0x6f, 0xb8, 0xed, + 0xe1, 0x43, 0xb3, 0xd8, 0x78, 0xcd, 0x80, 0x55, 0xe9, 0xd9, 0x32, 0x47, 0x76, 0x58, 0x43, 0x4c, + 0x55, 0x38, 0xa7, 0xd0, 0x32, 0x05, 0x6a, 0x41, 0xd3, 0x1b, 0x4d, 0xdf, 0x4d, 0xc6, 0x5e, 0x67, + 0x8b, 0x76, 0x61, 0xdf, 0x9b, 0xb1, 0xf1, 0x68, 0x3a, 0x7e, 0xe9, 0x7f, 0x7a, 0xcb, 0xfc, 0xf3, + 0x8b, 0x0f, 0x6f, 0x5e, 0x7b, 0x1d, 0xe2, 0x8c, 0x54, 0x55, 0xb0, 0x46, 0xd1, 0x27, 0xd0, 0xe4, + 0x28, 0x8a, 0x2b, 0x59, 0x37, 0xd4, 0xfd, 0xb7, 0x21, 0xed, 0xb3, 0x3a, 0xe7, 0x7c, 0x23, 0xb0, + 0xab, 0x0d, 0xfa, 0x08, 0xa8, 0x90, 0x01, 0x97, 0xbe, 0x9e, 0x98, 0x0c, 0x92, 0xdc, 0x4f, 0x14, + 0x87, 0xb8, 0x0d, 0xd6, 0xd1, 0xce, 0xac, 0x36, 0xa6, 0x82, 0xba, 0xd0, 0xc1, 0x34, 0xda, 0xcc, + 0x6e, 0xeb, 0x6c, 0x1b, 0xd3, 0xc8, 0x4c, 0x9e, 0xc2, 0x9d, 0x24, 0x90, 0xe1, 0x02, 0xb9, 0xb0, + 0x1b, 0xfa, 0x56, 0xb6, 0x79, 0xab, 0x49, 0x30, 0xc7, 0xab, 0x69, 0x19, 0x60, 0xeb, 0x24, 0x3d, + 0x81, 0xdd, 0x45, 0x9c, 0x4a, 0x61, 0xef, 0xf4, 0x89, 0x6b, 0x0d, 0x0f, 0xff, 0x1e, 0xee, 0x85, + 0x32, 0x59, 0x99, 0x71, 0xc6, 0x60, 0x19, 0xcd, 0xd1, 0x67, 0xff, 0xbf, 0x25, 0x1b, 0xfb, 0x71, + 0x03, 0xfb, 0xe7, 0x8b, 0x22, 0xfd, 0xaa, 0x3e, 0x8e, 0x31, 0xd5, 0x17, 0xd0, 0x0e, 0x4b, 0xd9, + 0xdf, 0x40, 0x3e, 0x30, 0x91, 0x55, 0x61, 0x45, 0xbd, 0x1b, 0x9a, 0x8f, 0xf4, 0x18, 0x2c, 0xb5, + 0x46, 0x4b, 0x3f, 0x4e, 0x23, 0xbc, 0xa9, 0xe6, 0x04, 0x5a, 0x7a, 0xa5, 0x94, 0xb3, 0x83, 0xdb, + 0x55, 0x8f, 0xfc, 0x58, 0xf5, 0xc8, 0xaf, 0x55, 0x8f, 0x7c, 0xde, 0x53, 0xdc, 0x7c, 0x3e, 0xdf, + 0xd3, 0x3f, 0xc1, 0xd3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xb6, 0x6b, 0xcd, 0x43, 0x03, + 0x00, 0x00, } func (m *WriteRequest) Marshal() (dAtA []byte, err error) { @@ -385,6 +508,24 @@ func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.AcceptedResponseTypes) > 0 { + dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10) + var j1 int + for _, num := range m.AcceptedResponseTypes { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintRemote(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x12 + } if len(m.Queries) > 0 { for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { { @@ -547,6 +688,52 @@ func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.QueryIndex != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChunkedSeries) > 0 { + for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { offset -= sovRemote(v) base := offset @@ -588,6 +775,13 @@ func (m *ReadRequest) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if len(m.AcceptedResponseTypes) > 0 { + l = 0 + for _, e := range m.AcceptedResponseTypes { + l += sovRemote(uint64(e)) + } + n += 1 + sovRemote(uint64(l)) + l + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -658,6 +852,27 @@ func (m *QueryResult) Size() (n int) { return n } +func (m *ChunkedReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkedSeries) > 0 { + for _, e := range m.ChunkedSeries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.QueryIndex != 0 { + n += 1 + sovRemote(uint64(m.QueryIndex)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovRemote(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -815,6 +1030,75 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType == 0 { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 { + m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount) + } + for iNdEx < postIndex { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType) + } default: iNdEx = preIndex skippy, err := skipRemote(dAtA[iNdEx:]) @@ -1178,6 +1462,113 @@ func (m *QueryResult) Unmarshal(dAtA []byte) error { } return nil } +func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkedSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkedSeries = append(m.ChunkedSeries, &ChunkedSeries{}) + if err := m.ChunkedSeries[len(m.ChunkedSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryIndex", wireType) + } + m.QueryIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipRemote(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto index cf86f0dd5a1..da2b06f2977 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -23,10 +23,36 @@ message WriteRequest { repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; } +// ReadRequest represents a remote read request. message ReadRequest { repeated Query queries = 1; + + enum ResponseType { + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + SAMPLES = 0; + // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + STREAMED_XOR_CHUNKS = 1; + } + + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + repeated ResponseType accepted_response_types = 2; } +// ReadResponse is a response when response_type equals SAMPLES. message ReadResponse { // In same order as the request's queries. repeated QueryResult results = 1; @@ -43,3 +69,14 @@ message QueryResult { // Samples within a time series must be ordered by time. repeated prometheus.TimeSeries timeseries = 1; } + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. +message ChunkedReadResponse { + repeated prometheus.ChunkedSeries chunked_series = 1; + + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + int64 query_index = 2; +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index be2331051c0..ea3df4dad8e 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -56,6 +56,32 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{4, 0} } +// We require this to match chunkenc.Encoding. +type Chunk_Encoding int32 + +const ( + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 +) + +var Chunk_Encoding_name = map[int32]string{ + 0: "UNKNOWN", + 1: "XOR", +} + +var Chunk_Encoding_value = map[string]int32{ + "UNKNOWN": 0, + "XOR": 1, +} + +func (x Chunk_Encoding) String() string { + return proto.EnumName(Chunk_Encoding_name, int32(x)) +} + +func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{6, 0} +} + type Sample struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` @@ -111,6 +137,7 @@ func (m *Sample) GetTimestamp() int64 { return 0 } +// TimeSeries represents samples and labels for a single time series. type TimeSeries struct { Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` @@ -403,44 +430,185 @@ func (m *ReadHints) GetEndMs() int64 { return 0 } +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +type Chunk struct { + MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` + MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` + Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (m *Chunk) String() string { return proto.CompactTextString(m) } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{6} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetMinTimeMs() int64 { + if m != nil { + return m.MinTimeMs + } + return 0 +} + +func (m *Chunk) GetMaxTimeMs() int64 { + if m != nil { + return m.MaxTimeMs + } + return 0 +} + +func (m *Chunk) GetType() Chunk_Encoding { + if m != nil { + return m.Type + } + return Chunk_UNKNOWN +} + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// ChunkedSeries represents single, encoded time series. +type ChunkedSeries struct { + // Labels should be sorted. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // Chunks will be in start time order and may overlap. + Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } +func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } +func (*ChunkedSeries) ProtoMessage() {} +func (*ChunkedSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{7} +} +func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedSeries.Merge(m, src) +} +func (m *ChunkedSeries) XXX_Size() int { + return m.Size() +} +func (m *ChunkedSeries) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo + +func (m *ChunkedSeries) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ChunkedSeries) GetChunks() []Chunk { + if m != nil { + return m.Chunks + } + return nil +} + func init() { proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) + proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Labels)(nil), "prometheus.Labels") proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher") proto.RegisterType((*ReadHints)(nil), "prometheus.ReadHints") + proto.RegisterType((*Chunk)(nil), "prometheus.Chunk") + proto.RegisterType((*ChunkedSeries)(nil), "prometheus.ChunkedSeries") } func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 379 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4f, 0xef, 0xd2, 0x40, - 0x14, 0x64, 0xdb, 0xb2, 0x95, 0x87, 0x31, 0x75, 0x83, 0xb1, 0x1a, 0x45, 0xd2, 0x53, 0x4f, 0x25, - 0xe0, 0xc9, 0xc4, 0x13, 0x49, 0x13, 0x0f, 0xd4, 0x84, 0x85, 0x93, 0x17, 0xb3, 0xc0, 0x13, 0x6a, - 0xfa, 0x67, 0xed, 0x2e, 0x26, 0x7c, 0x10, 0xbf, 0x13, 0x47, 0x3f, 0x81, 0x31, 0x7c, 0x12, 0xb3, - 0x5b, 0x10, 0x12, 0xbd, 0xfc, 0x6e, 0x6f, 0xe6, 0xcd, 0x74, 0xe6, 0x35, 0x0b, 0x7d, 0x7d, 0x94, - 0xa8, 0x12, 0xd9, 0xd4, 0xba, 0x66, 0x20, 0x9b, 0xba, 0x44, 0xbd, 0xc7, 0x83, 0x7a, 0x39, 0xd8, - 0xd5, 0xbb, 0xda, 0xd2, 0x63, 0x33, 0xb5, 0x8a, 0xe8, 0x3d, 0xd0, 0xa5, 0x28, 0x65, 0x81, 0x6c, - 0x00, 0xdd, 0xef, 0xa2, 0x38, 0x60, 0x48, 0x46, 0x24, 0x26, 0xbc, 0x05, 0xec, 0x15, 0xf4, 0x74, - 0x5e, 0xa2, 0xd2, 0xa2, 0x94, 0xa1, 0x33, 0x22, 0xb1, 0xcb, 0x6f, 0x44, 0xf4, 0x0d, 0x60, 0x95, - 0x97, 0xb8, 0xc4, 0x26, 0x47, 0xc5, 0xc6, 0x40, 0x0b, 0xb1, 0xc6, 0x42, 0x85, 0x64, 0xe4, 0xc6, - 0xfd, 0xe9, 0xd3, 0xe4, 0x16, 0x9f, 0xcc, 0xcd, 0x66, 0xe6, 0x9d, 0x7e, 0xbd, 0xe9, 0xf0, 0x8b, - 0x8c, 0x4d, 0xc1, 0x57, 0x36, 0x5c, 0x85, 0x8e, 0x75, 0xb0, 0x7b, 0x47, 0xdb, 0xeb, 0x62, 0xb9, - 0x0a, 0xa3, 0x09, 0x74, 0xed, 0xa7, 0x18, 0x03, 0xaf, 0x12, 0x65, 0x5b, 0xb7, 0xc7, 0xed, 0x7c, - 0xbb, 0xc1, 0xb1, 0x64, 0x0b, 0xa2, 0x77, 0x40, 0xe7, 0x6d, 0xe0, 0x43, 0x1b, 0x46, 0x3f, 0x08, - 0x3c, 0xb6, 0x7c, 0x26, 0xf4, 0x66, 0x8f, 0x0d, 0x9b, 0x80, 0x67, 0x7e, 0xb0, 0x4d, 0x7d, 0x32, - 0x7d, 0xfd, 0x8f, 0xff, 0xa2, 0x4b, 0x56, 0x47, 0x89, 0xdc, 0x4a, 0xff, 0x16, 0x75, 0xfe, 0x57, - 0xd4, 0xbd, 0x2f, 0x1a, 0x83, 0x67, 0x7c, 0x8c, 0x82, 0x93, 0x2e, 0x82, 0x0e, 0xf3, 0xc1, 0xfd, - 0x98, 0x2e, 0x02, 0x62, 0x08, 0x9e, 0x06, 0x8e, 0x25, 0x78, 0x1a, 0xb8, 0xd1, 0x57, 0xe8, 0x71, - 0x14, 0xdb, 0x0f, 0x79, 0xa5, 0x15, 0x7b, 0x0e, 0xbe, 0xd2, 0x28, 0x3f, 0x97, 0xca, 0xd6, 0x72, - 0x39, 0x35, 0x30, 0x53, 0x26, 0xf9, 0xcb, 0xa1, 0xda, 0x5c, 0x93, 0xcd, 0xcc, 0x5e, 0xc0, 0x23, - 0xa5, 0x45, 0xa3, 0x8d, 0xda, 0xb5, 0x6a, 0xdf, 0xe2, 0x4c, 0xb1, 0x67, 0x40, 0xb1, 0xda, 0x9a, - 0x85, 0x67, 0x17, 0x5d, 0xac, 0xb6, 0x99, 0x9a, 0x0d, 0x4e, 0xe7, 0x21, 0xf9, 0x79, 0x1e, 0x92, - 0xdf, 0xe7, 0x21, 0xf9, 0x44, 0xcd, 0xc5, 0x72, 0xbd, 0xa6, 0xf6, 0xfd, 0xbc, 0xfd, 0x13, 0x00, - 0x00, 0xff, 0xff, 0xe3, 0x8a, 0x88, 0x84, 0x70, 0x02, 0x00, 0x00, + // 496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xed, 0xd8, 0x89, 0xd3, 0xdc, 0xf4, 0xfb, 0xe4, 0x8e, 0x82, 0x08, 0x15, 0x84, 0xc8, 0xab, + 0xac, 0x1c, 0x35, 0xac, 0x90, 0x58, 0x15, 0x59, 0x42, 0xa2, 0x4e, 0xd5, 0x69, 0x11, 0x88, 0x4d, + 0x35, 0x89, 0x87, 0xc4, 0x90, 0x19, 0xbb, 0x9e, 0x09, 0x6a, 0x1f, 0x84, 0xc7, 0xe0, 0x3d, 0xba, + 0xe4, 0x09, 0x10, 0xca, 0x93, 0xa0, 0xb9, 0xb6, 0xeb, 0x48, 0x65, 0x03, 0xbb, 0xfb, 0x73, 0xce, + 0x9c, 0x93, 0x93, 0x6b, 0xe8, 0x99, 0xdb, 0x5c, 0xe8, 0x30, 0x2f, 0x32, 0x93, 0x51, 0xc8, 0x8b, + 0x4c, 0x0a, 0xb3, 0x12, 0x1b, 0x7d, 0xd4, 0x5f, 0x66, 0xcb, 0x0c, 0xc7, 0x13, 0x5b, 0x95, 0x88, + 0xe0, 0x15, 0x78, 0x17, 0x5c, 0xe6, 0x6b, 0x41, 0xfb, 0xd0, 0xfe, 0xca, 0xd7, 0x1b, 0x31, 0x20, + 0x23, 0x32, 0x26, 0xac, 0x6c, 0xe8, 0x53, 0xe8, 0x9a, 0x54, 0x0a, 0x6d, 0xb8, 0xcc, 0x07, 0xce, + 0x88, 0x8c, 0x5d, 0xd6, 0x0c, 0x82, 0x6b, 0x80, 0xcb, 0x54, 0x8a, 0x0b, 0x51, 0xa4, 0x42, 0xd3, + 0x09, 0x78, 0x6b, 0x3e, 0x17, 0x6b, 0x3d, 0x20, 0x23, 0x77, 0xdc, 0x9b, 0x1e, 0x86, 0x8d, 0x7c, + 0x78, 0x6a, 0x37, 0x27, 0xad, 0xbb, 0x9f, 0xcf, 0xf7, 0x58, 0x05, 0xa3, 0x53, 0xe8, 0x68, 0x14, + 0xd7, 0x03, 0x07, 0x19, 0x74, 0x97, 0x51, 0xfa, 0xaa, 0x28, 0x35, 0x30, 0x38, 0x86, 0x36, 0x3e, + 0x45, 0x29, 0xb4, 0x14, 0x97, 0xa5, 0xdd, 0x2e, 0xc3, 0xba, 0xf9, 0x0d, 0x0e, 0x0e, 0xcb, 0x26, + 0x78, 0x09, 0xde, 0x69, 0x29, 0xf8, 0xb7, 0x0e, 0x83, 0x6f, 0x04, 0x0e, 0x70, 0x1e, 0x73, 0xb3, + 0x58, 0x89, 0x82, 0x1e, 0x43, 0xcb, 0x06, 0x8c, 0xaa, 0xff, 0x4f, 0x9f, 0x3d, 0xe0, 0x57, 0xb8, + 0xf0, 0xf2, 0x36, 0x17, 0x0c, 0xa1, 0xf7, 0x46, 0x9d, 0x3f, 0x19, 0x75, 0x77, 0x8d, 0x8e, 0xa1, + 0x65, 0x79, 0xd4, 0x03, 0x27, 0x3a, 0xf7, 0xf7, 0x68, 0x07, 0xdc, 0x59, 0x74, 0xee, 0x13, 0x3b, + 0x60, 0x91, 0xef, 0xe0, 0x80, 0x45, 0xbe, 0x1b, 0x7c, 0x86, 0x2e, 0x13, 0x3c, 0x79, 0x93, 0x2a, + 0xa3, 0xe9, 0x63, 0xe8, 0x68, 0x23, 0xf2, 0x2b, 0xa9, 0xd1, 0x96, 0xcb, 0x3c, 0xdb, 0xc6, 0xda, + 0x2a, 0x7f, 0xda, 0xa8, 0x45, 0xad, 0x6c, 0x6b, 0xfa, 0x04, 0xf6, 0xb5, 0xe1, 0x85, 0xb1, 0x68, + 0x17, 0xd1, 0x1d, 0xec, 0x63, 0x4d, 0x1f, 0x81, 0x27, 0x54, 0x62, 0x17, 0x2d, 0x5c, 0xb4, 0x85, + 0x4a, 0x62, 0x1d, 0x7c, 0x27, 0xd0, 0x7e, 0xbd, 0xda, 0xa8, 0x2f, 0x74, 0x08, 0x3d, 0x99, 0xaa, + 0x2b, 0xfb, 0xff, 0x37, 0x62, 0x5d, 0x99, 0x2a, 0x7b, 0x04, 0xb1, 0xc6, 0x3d, 0xbf, 0xb9, 0xdf, + 0x57, 0xe7, 0x22, 0xf9, 0x4d, 0xb5, 0x0f, 0xab, 0xf0, 0x5c, 0x0c, 0xef, 0x68, 0x37, 0x3c, 0x14, + 0x08, 0x23, 0xb5, 0xc8, 0x92, 0x54, 0x2d, 0x9b, 0xe4, 0x12, 0x6e, 0x38, 0xda, 0x39, 0x60, 0x58, + 0x07, 0x23, 0xd8, 0xaf, 0x51, 0xb4, 0x07, 0x9d, 0x77, 0xb3, 0xb7, 0xb3, 0xb3, 0xf7, 0xb3, 0x32, + 0xac, 0x0f, 0x67, 0xcc, 0x27, 0xc1, 0x35, 0xfc, 0x87, 0xaf, 0x89, 0xe4, 0x5f, 0xef, 0x72, 0x02, + 0xde, 0xc2, 0xbe, 0x50, 0x9f, 0xe5, 0xe1, 0x03, 0xa7, 0x35, 0xa1, 0x84, 0x9d, 0xf4, 0xef, 0xb6, + 0x43, 0xf2, 0x63, 0x3b, 0x24, 0xbf, 0xb6, 0x43, 0xf2, 0xd1, 0xb3, 0xe8, 0x7c, 0x3e, 0xf7, 0xf0, + 0x13, 0x7b, 0xf1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x9b, 0x28, 0x1b, 0x93, 0x03, 0x00, 0x00, } func (m *Sample) Marshal() (dAtA []byte, err error) { @@ -713,6 +881,110 @@ func (m *ReadHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.MaxTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTimeMs)) + i-- + dAtA[i] = 0x10 + } + if m.MinTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MinTimeMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChunkedSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -852,6 +1124,55 @@ func (m *ReadHints) Size() (n int) { return n } +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MinTimeMs)) + } + if m.MaxTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MaxTimeMs)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChunkedSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1550,6 +1871,273 @@ func (m *ReadHints) Unmarshal(dAtA []byte) error { } return nil } +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) + } + m.MinTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) + } + m.MaxTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Chunk_Encoding(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkedSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index 6fc84c40587..04671bcf1ff 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -23,6 +23,7 @@ message Sample { int64 timestamp = 2; } +// TimeSeries represents samples and labels for a single time series. message TimeSeries { repeated Label labels = 1 [(gogoproto.nullable) = false]; repeated Sample samples = 2 [(gogoproto.nullable) = false]; @@ -56,3 +57,26 @@ message ReadHints { int64 start_ms = 3; // Start time in milliseconds. int64 end_ms = 4; // End time in milliseconds. } + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +message Chunk { + int64 min_time_ms = 1; + int64 max_time_ms = 2; + + // We require this to match chunkenc.Encoding. + enum Encoding { + UNKNOWN = 0; + XOR = 1; + } + Encoding type = 3; + bytes data = 4; +} + +// ChunkedSeries represents single, encoded time series. +message ChunkedSeries { + // Labels should be sorted. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + // Chunks will be in start time order and may overlap. + repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 0687818a2fe..256c3d775c4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -15,14 +15,15 @@ package promql import ( "encoding/json" - "github.com/edsrzf/mmap-go" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "os" "path/filepath" "strings" "time" "unicode/utf8" + + "github.com/edsrzf/mmap-go" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" ) type ActiveQueryTracker struct { @@ -75,27 +76,27 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } -func getMMapedFile(filename string, filesize int, logger log.Logger) (error, []byte) { +func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) if err != nil { level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) - return err, []byte{} + return nil, err } err = file.Truncate(int64(filesize)) if err != nil { level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) - return err, []byte{} + return nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) - return err, []byte{} + return nil, err } - return err, fileAsBytes + return fileAsBytes, err } func NewActiveQueryTracker(localStoragePath string, maxQueries int, logger log.Logger) *ActiveQueryTracker { @@ -107,7 +108,7 @@ func NewActiveQueryTracker(localStoragePath string, maxQueries int, logger log.L filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxQueries*entrySize logUnfinishedQueries(filename, filesize, logger) - err, fileAsBytes := getMMapedFile(filename, filesize, logger) + fileAsBytes, err := getMMapedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } @@ -130,7 +131,7 @@ func trimStringByBytes(str string, size int) string { trimIndex := len(bytesStr) if size < len(bytesStr) { for !utf8.RuneStart(bytesStr[size]) { - size -= 1 + size-- } trimIndex = size } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 563940d4963..05ce3798ed3 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -15,18 +15,17 @@ package rules import ( "context" - "errors" + html_template "html/template" "math" "net/url" "sort" "sync" "time" - html_template "html/template" - "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -898,7 +897,7 @@ func (m *Manager) LoadGroups( for _, r := range rg.Rules { expr, err := promql.ParseExpr(r.Expr) if err != nil { - return nil, []error{err} + return nil, []error{errors.Wrap(err, fn)} } if r.Alert != "" { diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 07fa6d091b3..b85309cd0d6 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -601,8 +601,8 @@ type scrapeLoop struct { sampleMutator labelsMutator reportSampleMutator labelsMutator + parentCtx context.Context ctx context.Context - scrapeCtx context.Context cancel func() stopped chan struct{} } @@ -857,10 +857,10 @@ func newScrapeLoop(ctx context.Context, stopped: make(chan struct{}), jitterSeed: jitterSeed, l: l, - ctx: ctx, + parentCtx: ctx, honorTimestamps: honorTimestamps, } - sl.scrapeCtx, sl.cancel = context.WithCancel(ctx) + sl.ctx, sl.cancel = context.WithCancel(ctx) return sl } @@ -869,7 +869,7 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) { select { case <-time.After(sl.scraper.offset(interval, sl.jitterSeed)): // Continue after a scraping offset. - case <-sl.scrapeCtx.Done(): + case <-sl.ctx.Done(): close(sl.stopped) return } @@ -882,10 +882,10 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) { mainLoop: for { select { - case <-sl.ctx.Done(): + case <-sl.parentCtx.Done(): close(sl.stopped) return - case <-sl.scrapeCtx.Done(): + case <-sl.ctx.Done(): break mainLoop default: } @@ -947,10 +947,10 @@ mainLoop: last = start select { - case <-sl.ctx.Done(): + case <-sl.parentCtx.Done(): close(sl.stopped) return - case <-sl.scrapeCtx.Done(): + case <-sl.ctx.Done(): break mainLoop case <-ticker.C: } @@ -977,7 +977,7 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int // Wait for when the next scrape would have been, record its timestamp. var staleTime time.Time select { - case <-sl.ctx.Done(): + case <-sl.parentCtx.Done(): return case <-ticker.C: staleTime = time.Now() @@ -986,14 +986,14 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int // Wait for when the next scrape would have been, if the target was recreated // samples should have been ingested by now. select { - case <-sl.ctx.Done(): + case <-sl.parentCtx.Done(): return case <-ticker.C: } // Wait for an extra 10% of the interval, just to be safe. select { - case <-sl.ctx.Done(): + case <-sl.parentCtx.Done(): return case <-time.After(interval / 10): } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go new file mode 100644 index 00000000000..baee2350a1e --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go @@ -0,0 +1,154 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package remote + +import ( + "bufio" + "encoding/binary" + "hash" + "hash/crc32" + "io" + "net/http" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. +// 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. +const DefaultChunkedReadLimit = 5e+7 + +// The table gets initialized with sync.Once but may still cause a race +// with any other use of the crc32 package anywhere. Thus we initialize it +// before. +var castagnoliTable *crc32.Table + +func init() { + castagnoliTable = crc32.MakeTable(crc32.Castagnoli) +} + +// ChunkedWriter is an io.Writer wrapper that allows streaming by adding uvarint delimiter before each write in a form +// of length of the corresponded byte array. +type ChunkedWriter struct { + writer io.Writer + flusher http.Flusher + + crc32 hash.Hash32 +} + +// NewChunkedWriter constructs a ChunkedWriter. +func NewChunkedWriter(w io.Writer, f http.Flusher) *ChunkedWriter { + return &ChunkedWriter{writer: w, flusher: f, crc32: crc32.New(castagnoliTable)} +} + +// Write writes given bytes to the stream and flushes it. +// Each frame includes: +// +// 1. uvarint for the size of the data frame. +// 2. big-endian uint32 for the Castagnoli polynomial CRC-32 checksum of the data frame. +// 3. the bytes of the given data. +// +// Write returns number of sent bytes for a given buffer. The number does not include delimiter and checksum bytes. +func (w *ChunkedWriter) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + + var buf [binary.MaxVarintLen64]byte + v := binary.PutUvarint(buf[:], uint64(len(b))) + if _, err := w.writer.Write(buf[:v]); err != nil { + return 0, err + } + + w.crc32.Reset() + if _, err := w.crc32.Write(b); err != nil { + return 0, err + } + + if err := binary.Write(w.writer, binary.BigEndian, w.crc32.Sum32()); err != nil { + return 0, err + } + + n, err := w.writer.Write(b) + if err != nil { + return n, err + } + + w.flusher.Flush() + return n, nil +} + +// ChunkedReader is a buffered reader that expects uvarint delimiter and checksum before each message. +// It will allocate as much as the biggest frame defined by delimiter (on top of bufio.Reader allocations). +type ChunkedReader struct { + b *bufio.Reader + data []byte + sizeLimit uint64 + + crc32 hash.Hash32 +} + +// NewChunkedReader constructs a ChunkedReader. +// It allows passing data slice for byte slice reuse, which will be increased to needed size if smaller. +func NewChunkedReader(r io.Reader, sizeLimit uint64, data []byte) *ChunkedReader { + return &ChunkedReader{b: bufio.NewReader(r), sizeLimit: sizeLimit, data: data, crc32: crc32.New(castagnoliTable)} +} + +// Next returns the next length-delimited record from the input, or io.EOF if +// there are no more records available. Returns io.ErrUnexpectedEOF if a short +// record is found, with a length of n but fewer than n bytes of data. +// Next also verifies the given checksum with Castagnoli polynomial CRC-32 checksum. +// +// NOTE: The slice returned is valid only until a subsequent call to Next. It's a caller's responsibility to copy the +// returned slice if needed. +func (r *ChunkedReader) Next() ([]byte, error) { + size, err := binary.ReadUvarint(r.b) + if err != nil { + return nil, err + } + + if size > r.sizeLimit { + return nil, errors.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size) + } + + if cap(r.data) < int(size) { + r.data = make([]byte, size) + } else { + r.data = r.data[:size] + } + + var crc32 uint32 + if err := binary.Read(r.b, binary.BigEndian, &crc32); err != nil { + return nil, err + } + + r.crc32.Reset() + if _, err := io.ReadFull(io.TeeReader(r.b, r.crc32), r.data); err != nil { + return nil, err + } + + if r.crc32.Sum32() != crc32 { + return nil, errors.New("chunkedReader: corrupted frame; checksum mismatch") + } + return r.data, nil +} + +// NextProto consumes the next available record by calling r.Next, and decodes +// it into the protobuf with proto.Unmarshal. +func (r *ChunkedReader) NextProto(pb proto.Message) error { + rec, err := r.Next() + if err != nil { + return err + } + return proto.Unmarshal(rec, pb) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 85112ba9cdb..a7c099167cc 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -21,6 +21,7 @@ import ( "io" "io/ioutil" "net/http" + "strings" "time" "github.com/gogo/protobuf/proto" @@ -155,13 +156,14 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe io.Copy(ioutil.Discard, httpResp.Body) httpResp.Body.Close() }() - if httpResp.StatusCode/100 != 2 { - return nil, errors.Errorf("server returned HTTP status %s", httpResp.Status) - } compressed, err = ioutil.ReadAll(httpResp.Body) if err != nil { - return nil, errors.Wrap(err, "error reading response") + return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status)) + } + + if httpResp.StatusCode/100 != 2 { + return nil, errors.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed))) } uncompressed, err := snappy.Decode(nil, compressed) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index dc4ee280979..19f0379342e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -24,10 +24,10 @@ import ( "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) // decodeReadLimit is the maximum size of a read request body in bytes. @@ -73,9 +73,6 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error return err } - w.Header().Set("Content-Type", "application/x-protobuf") - w.Header().Set("Content-Encoding", "snappy") - compressed := snappy.Encode(nil, data) _, err = w.Write(compressed) return err @@ -106,25 +103,6 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, p *storage.SelectParams }, nil } -// FromQuery unpacks a Query proto. -func FromQuery(req *prompb.Query) (int64, int64, []*labels.Matcher, *storage.SelectParams, error) { - matchers, err := fromLabelMatchers(req.Matchers) - if err != nil { - return 0, 0, nil, nil, err - } - var selectParams *storage.SelectParams - if req.Hints != nil { - selectParams = &storage.SelectParams{ - Start: req.Hints.StartMs, - End: req.Hints.EndMs, - Step: req.Hints.StepMs, - Func: req.Hints.Func, - } - } - - return req.StartTimestampMs, req.EndTimestampMs, matchers, selectParams, nil -} - // ToQueryResult builds a QueryResult proto. func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, error) { numSamples := 0 @@ -153,7 +131,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, } resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ - Labels: labelsToLabelsProto(series.Labels()), + Labels: labelsToLabelsProto(series.Labels(), nil), Samples: samples, }) } @@ -183,6 +161,182 @@ func FromQueryResult(res *prompb.QueryResult) storage.SeriesSet { } } +// NegotiateResponseType returns first accepted response type that this server supports. +// On the empty accepted list we assume that the SAMPLES response type was requested. This is to maintain backward compatibility. +func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.ReadRequest_ResponseType, error) { + if len(accepted) == 0 { + accepted = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES} + } + + supported := map[prompb.ReadRequest_ResponseType]struct{}{ + prompb.ReadRequest_SAMPLES: {}, + prompb.ReadRequest_STREAMED_XOR_CHUNKS: {}, + } + + for _, resType := range accepted { + if _, ok := supported[resType]; ok { + return resType, nil + } + } + return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) +} + +// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller. +// TODO(bwplotka): Encode only what's needed. Fetch the encoded series from blocks instead of re-encoding everything. +func StreamChunkedReadResponses( + stream io.Writer, + queryIndex int64, + ss storage.SeriesSet, + sortedExternalLabels []prompb.Label, + maxBytesInFrame int, +) error { + var ( + chks []prompb.Chunk + lbls []prompb.Label + err error + lblsSize int + ) + + for ss.Next() { + series := ss.At() + iter := series.Iterator() + lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) + + lblsSize = 0 + for _, lbl := range lbls { + lblsSize += lbl.Size() + } + + // Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. + for { + // TODO(bwplotka): Use ChunkIterator once available in TSDB instead of re-encoding: https://github.com/prometheus/prometheus/pull/5882 + chks, err = encodeChunks(iter, chks, maxBytesInFrame-lblsSize) + if err != nil { + return err + } + + if len(chks) == 0 { + break + } + + b, err := proto.Marshal(&prompb.ChunkedReadResponse{ + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: lbls, + Chunks: chks, + }, + }, + QueryIndex: queryIndex, + }) + if err != nil { + return errors.Wrap(err, "marshal ChunkedReadResponse") + } + + if _, err := stream.Write(b); err != nil { + return errors.Wrap(err, "write to stream") + } + + chks = chks[:0] + } + + if err := iter.Err(); err != nil { + return err + } + } + if err := ss.Err(); err != nil { + return err + } + + return nil +} + +// encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking). +func encodeChunks(iter storage.SeriesIterator, chks []prompb.Chunk, frameBytesLeft int) ([]prompb.Chunk, error) { + const maxSamplesInChunk = 120 + + var ( + chkMint int64 + chkMaxt int64 + chk *chunkenc.XORChunk + app chunkenc.Appender + err error + ) + + for iter.Next() { + if chk == nil { + chk = chunkenc.NewXORChunk() + app, err = chk.Appender() + if err != nil { + return nil, err + } + chkMint, _ = iter.At() + } + + app.Append(iter.At()) + chkMaxt, _ = iter.At() + + if chk.NumSamples() < maxSamplesInChunk { + continue + } + + // Cut the chunk. + chks = append(chks, prompb.Chunk{ + MinTimeMs: chkMint, + MaxTimeMs: chkMaxt, + Type: prompb.Chunk_Encoding(chk.Encoding()), + Data: chk.Bytes(), + }) + chk = nil + frameBytesLeft -= chks[len(chks)-1].Size() + + // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. + if frameBytesLeft <= 0 { + break + } + } + if iter.Err() != nil { + return nil, errors.Wrap(iter.Err(), "iter TSDB series") + } + + if chk != nil { + // Cut the chunk if exists. + chks = append(chks, prompb.Chunk{ + MinTimeMs: chkMint, + MaxTimeMs: chkMaxt, + Type: prompb.Chunk_Encoding(chk.Encoding()), + Data: chk.Bytes(), + }) + } + return chks, nil +} + +// MergeLabels merges two sets of sorted proto labels, preferring those in +// primary to those in secondary when there is an overlap. +func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { + result := make([]prompb.Label, 0, len(primary)+len(secondary)) + i, j := 0, 0 + for i < len(primary) && j < len(secondary) { + if primary[i].Name < secondary[j].Name { + result = append(result, primary[i]) + i++ + } else if primary[i].Name > secondary[j].Name { + result = append(result, secondary[j]) + j++ + } else { + result = append(result, primary[i]) + i++ + j++ + } + } + for ; i < len(primary); i++ { + result = append(result, primary[i]) + } + for ; j < len(secondary); j++ { + result = append(result, secondary[j]) + } + return result +} + type byLabel []storage.Series func (a byLabel) Len() int { return len(a) } @@ -322,7 +476,8 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) return pbMatchers, nil } -func fromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { +// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers. +func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { result := make([]*labels.Matcher, 0, len(matchers)) for _, matcher := range matchers { var mtype labels.MatchType @@ -368,12 +523,17 @@ func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels { return result } -func labelsToLabelsProto(labels labels.Labels) []prompb.Label { - result := make([]prompb.Label, 0, len(labels)) +// labelsToLabelsProto transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label { + result := buf[:0] + if cap(buf) < len(labels) { + result = make([]prompb.Label, 0, len(labels)) + } for _, l := range labels { result = append(result, prompb.Label{ - Name: interner.intern(l.Name), - Value: interner.intern(l.Value), + Name: l.Name, + Value: l.Value, }) } return result diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index cda8eda3b78..4ce98dfc532 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -143,6 +143,33 @@ var ( }, []string{queue}, ) + maxNumShards = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_max", + Help: "The maximum number of shards that the queue is allowed to run.", + }, + []string{queue}, + ) + minNumShards = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_min", + Help: "The minimum number of shards that the queue is allowed to run.", + }, + []string{queue}, + ) + desiredNumShards = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_desired", + Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", + }, + []string{queue}, + ) ) // StorageClient defines an interface for sending a batch of samples to an @@ -166,7 +193,7 @@ type QueueManager struct { client StorageClient watcher *WALWatcher - seriesLabels map[uint64][]prompb.Label + seriesLabels map[uint64]labels.Labels seriesSegmentIndexes map[uint64]int droppedSeries map[uint64]struct{} @@ -190,6 +217,9 @@ type QueueManager struct { succeededSamplesTotal prometheus.Counter retriedSamplesTotal prometheus.Counter shardCapacity prometheus.Gauge + maxNumShards prometheus.Gauge + minNumShards prometheus.Gauge + desiredNumShards prometheus.Gauge } // NewQueueManager builds a new QueueManager. @@ -208,7 +238,7 @@ func NewQueueManager(logger log.Logger, walDir string, samplesIn *ewmaRate, cfg relabelConfigs: relabelConfigs, client: client, - seriesLabels: make(map[uint64][]prompb.Label), + seriesLabels: make(map[uint64]labels.Labels), seriesSegmentIndexes: make(map[uint64]int), droppedSeries: make(map[uint64]struct{}), @@ -230,15 +260,15 @@ func NewQueueManager(logger log.Logger, walDir string, samplesIn *ewmaRate, cfg // Append queues a sample to be sent to the remote storage. Blocks until all samples are // enqueued on their shards or a shutdown signal is received. -func (t *QueueManager) Append(s []tsdb.RefSample) bool { +func (t *QueueManager) Append(samples []tsdb.RefSample) bool { outer: - for _, sample := range s { - lbls, ok := t.seriesLabels[sample.Ref] + for _, s := range samples { + lbls, ok := t.seriesLabels[s.Ref] if !ok { t.droppedSamplesTotal.Inc() t.samplesDropped.incr(1) - if _, ok := t.droppedSeries[sample.Ref]; !ok { - level.Info(t.logger).Log("msg", "dropped sample for series that was not explicitly dropped via relabelling", "ref", sample.Ref) + if _, ok := t.droppedSeries[s.Ref]; !ok { + level.Info(t.logger).Log("msg", "dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) } continue } @@ -251,16 +281,11 @@ outer: default: } - ts := prompb.TimeSeries{ - Labels: lbls, - Samples: []prompb.Sample{ - { - Value: float64(sample.V), - Timestamp: sample.T, - }, - }, - } - if t.shards.enqueue(sample.Ref, ts) { + if t.shards.enqueue(s.Ref, sample{ + labels: lbls, + t: s.T, + v: s.V, + }) { continue outer } @@ -296,10 +321,16 @@ func (t *QueueManager) Start() { t.succeededSamplesTotal = succeededSamplesTotal.WithLabelValues(name) t.retriedSamplesTotal = retriedSamplesTotal.WithLabelValues(name) t.shardCapacity = shardCapacity.WithLabelValues(name) + t.maxNumShards = maxNumShards.WithLabelValues(name) + t.minNumShards = minNumShards.WithLabelValues(name) + t.desiredNumShards = desiredNumShards.WithLabelValues(name) // Initialise some metrics. t.shardCapacity.Set(float64(t.cfg.Capacity)) t.pendingSamplesMetric.Set(0) + t.maxNumShards.Set(float64(t.cfg.MaxShards)) + t.minNumShards.Set(float64(t.cfg.MinShards)) + t.desiredNumShards.Set(float64(t.cfg.MinShards)) t.shards.start(t.numShards) t.watcher.Start() @@ -325,7 +356,7 @@ func (t *QueueManager) Stop() { // On shutdown, release the strings in the labels from the intern pool. for _, labels := range t.seriesLabels { - release(labels) + releaseLabels(labels) } // Delete metrics so we don't have alerts for queues that are gone. name := t.client.Name() @@ -339,27 +370,30 @@ func (t *QueueManager) Stop() { succeededSamplesTotal.DeleteLabelValues(name) retriedSamplesTotal.DeleteLabelValues(name) shardCapacity.DeleteLabelValues(name) + maxNumShards.DeleteLabelValues(name) + minNumShards.DeleteLabelValues(name) + desiredNumShards.DeleteLabelValues(name) } // StoreSeries keeps track of which series we know about for lookups when sending samples to remote. func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) { for _, s := range series { ls := processExternalLabels(s.Labels, t.externalLabels) - rl := relabel.Process(ls, t.relabelConfigs...) - if len(rl) == 0 { + lbls := relabel.Process(ls, t.relabelConfigs...) + if len(lbls) == 0 { t.droppedSeries[s.Ref] = struct{}{} continue } t.seriesSegmentIndexes[s.Ref] = index - labels := labelsToLabelsProto(rl) + internLabels(lbls) // We should not ever be replacing a series labels in the map, but just // in case we do we need to ensure we do not leak the replaced interned // strings. if orig, ok := t.seriesLabels[s.Ref]; ok { - release(orig) + releaseLabels(orig) } - t.seriesLabels[s.Ref] = labels + t.seriesLabels[s.Ref] = lbls } } @@ -372,13 +406,20 @@ func (t *QueueManager) SeriesReset(index int) { for k, v := range t.seriesSegmentIndexes { if v < index { delete(t.seriesSegmentIndexes, k) - release(t.seriesLabels[k]) + releaseLabels(t.seriesLabels[k]) delete(t.seriesLabels, k) } } } -func release(ls []prompb.Label) { +func internLabels(lbls labels.Labels) { + for i, l := range lbls { + lbls[i].Name = interner.intern(l.Name) + lbls[i].Value = interner.intern(l.Value) + } +} + +func releaseLabels(ls labels.Labels) { for _, l := range ls { interner.release(l.Name) interner.release(l.Value) @@ -500,6 +541,7 @@ func (t *QueueManager) calculateDesiredShards() { } numShards := int(math.Ceil(desiredShards)) + t.desiredNumShards.Set(float64(numShards)) if numShards > t.cfg.MaxShards { numShards = t.cfg.MaxShards } else if numShards < t.cfg.MinShards { @@ -545,11 +587,17 @@ func (t *QueueManager) newShards() *shards { return s } +type sample struct { + labels labels.Labels + t int64 + v float64 +} + type shards struct { mtx sync.RWMutex // With the WAL, this is never actually contended. qm *QueueManager - queues []chan prompb.TimeSeries + queues []chan sample // Emulate a wait group with a channel and an atomic int, as you // cannot select on a wait group. @@ -569,9 +617,9 @@ func (s *shards) start(n int) { s.mtx.Lock() defer s.mtx.Unlock() - newQueues := make([]chan prompb.TimeSeries, n) + newQueues := make([]chan sample, n) for i := 0; i < n; i++ { - newQueues[i] = make(chan prompb.TimeSeries, s.qm.cfg.Capacity) + newQueues[i] = make(chan sample, s.qm.cfg.Capacity) } s.queues = newQueues @@ -619,7 +667,7 @@ func (s *shards) stop() { // enqueue a sample. If we are currently in the process of shutting down or resharding, // will return false; in this case, you should back off and retry. -func (s *shards) enqueue(ref uint64, sample prompb.TimeSeries) bool { +func (s *shards) enqueue(ref uint64, sample sample) bool { s.mtx.RLock() defer s.mtx.RUnlock() @@ -638,21 +686,24 @@ func (s *shards) enqueue(ref uint64, sample prompb.TimeSeries) bool { } } -func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeries) { +func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { defer func() { if atomic.AddInt32(&s.running, -1) == 0 { close(s.done) } }() - shardNum := strconv.Itoa(i) + shardNum := strconv.Itoa(shardID) // Send batches of at most MaxSamplesPerSend samples to the remote storage. // If we have fewer samples than that, flush them out after a deadline // anyways. - max := s.qm.cfg.MaxSamplesPerSend - pendingSamples := make([]prompb.TimeSeries, 0, max) - var buf []byte + var ( + max = s.qm.cfg.MaxSamplesPerSend + nPending = 0 + pendingSamples = allocateTimeSeries(max) + buf []byte + ) timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) stop := func() { @@ -672,24 +723,27 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri case sample, ok := <-queue: if !ok { - if len(pendingSamples) > 0 { - level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", len(pendingSamples)) - s.sendSamples(ctx, pendingSamples, &buf) - s.qm.pendingSamplesMetric.Sub(float64(len(pendingSamples))) + if nPending > 0 { + level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", nPending) + s.sendSamples(ctx, pendingSamples[:nPending], &buf) + s.qm.pendingSamplesMetric.Sub(float64(nPending)) level.Debug(s.qm.logger).Log("msg", "Done flushing.") } return } // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) - // retries endlessly, so once we reach > 100 samples, if we can never send to the endpoint we'll - // stop reading from the queue (which has a size of 10). - pendingSamples = append(pendingSamples, sample) + // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll + // stop reading from the queue. This makes it safe to reference pendingSamples by index. + pendingSamples[nPending].Labels = labelsToLabelsProto(sample.labels, pendingSamples[nPending].Labels) + pendingSamples[nPending].Samples[0].Timestamp = sample.t + pendingSamples[nPending].Samples[0].Value = sample.v + nPending++ s.qm.pendingSamplesMetric.Inc() - if len(pendingSamples) >= max { - s.sendSamples(ctx, pendingSamples[:max], &buf) - pendingSamples = append(pendingSamples[:0], pendingSamples[max:]...) + if nPending >= max { + s.sendSamples(ctx, pendingSamples, &buf) + nPending = 0 s.qm.pendingSamplesMetric.Sub(float64(max)) stop() @@ -697,12 +751,11 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri } case <-timer.C: - n := len(pendingSamples) - if n > 0 { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", n, "shard", shardNum) - s.sendSamples(ctx, pendingSamples, &buf) - pendingSamples = pendingSamples[:0] - s.qm.pendingSamplesMetric.Sub(float64(n)) + if nPending > 0 { + level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", nPending, "shard", shardNum) + s.sendSamples(ctx, pendingSamples[:nPending], &buf) + nPending = 0 + s.qm.pendingSamplesMetric.Sub(float64(nPending)) } timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) } @@ -790,3 +843,12 @@ func buildWriteRequest(samples []prompb.TimeSeries, buf []byte) ([]byte, int64, compressed := snappy.Encode(buf, data) return compressed, highest, nil } + +func allocateTimeSeries(capacity int) []prompb.TimeSeries { + timeseries := make([]prompb.TimeSeries, capacity) + // We only ever send one sample per timeseries, so preallocate with length one. + for i := range timeseries { + timeseries[i].Samples = []prompb.Sample{{}} + } + return timeseries +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index 6e97ced121f..9e6135913ee 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -15,6 +15,7 @@ package remote import ( "context" + "fmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" @@ -70,7 +71,7 @@ func (q *querier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) ( res, err := q.client.Read(q.ctx, query) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("remote_read: %v", err) } return FromQueryResult(res), nil, nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index dba8b1fd5fb..c322a4347f7 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -50,11 +50,13 @@ type WriteStorage struct { logger log.Logger mtx sync.Mutex - configHash [16]byte - walDir string - queues []*QueueManager - samplesIn *ewmaRate - flushDeadline time.Duration + configHash [16]byte + externalLabelHash [16]byte + walDir string + queues []*QueueManager + hashes [][16]byte + samplesIn *ewmaRate + flushDeadline time.Duration } // NewWriteStorage creates and runs a WriteStorage. @@ -81,6 +83,7 @@ func (rws *WriteStorage) run() { } // ApplyConfig updates the state as the new config requires. +// Only stop & create queues which have changes. func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.mtx.Lock() defer rws.mtx.Unlock() @@ -97,19 +100,38 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { return err } - hash := md5.Sum(append(cfgBytes, externalLabelBytes...)) - if hash == rws.configHash { + configHash := md5.Sum(cfgBytes) + externalLabelHash := md5.Sum(externalLabelBytes) + externalLabelUnchanged := externalLabelHash == rws.externalLabelHash + if configHash == rws.configHash && externalLabelUnchanged { level.Debug(rws.logger).Log("msg", "remote write config has not changed, no need to restart QueueManagers") return nil } - rws.configHash = hash + rws.configHash = configHash + rws.externalLabelHash = externalLabelHash // Update write queues newQueues := []*QueueManager{} - // TODO: we should only stop & recreate queues which have changes, - // as this can be quite disruptive. + newHashes := [][16]byte{} + newClientIndexes := []int{} for i, rwConf := range conf.RemoteWriteConfigs { + b, err := json.Marshal(rwConf) + if err != nil { + return err + } + + // Use RemoteWriteConfigs and its index to get hash. So if its index changed, + // the correspoinding queue should also be restarted. + hash := md5.Sum(b) + if i < len(rws.queues) && rws.hashes[i] == hash && externalLabelUnchanged { + // The RemoteWriteConfig and index both not changed, keep the queue. + newQueues = append(newQueues, rws.queues[i]) + newHashes = append(newHashes, hash) + rws.queues[i] = nil + continue + } + // Otherwise create a new queue. c, err := NewClient(i, &ClientConfig{ URL: rwConf.URL, Timeout: rwConf.RemoteTimeout, @@ -128,16 +150,24 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { c, rws.flushDeadline, )) + newHashes = append(newHashes, hash) + newClientIndexes = append(newClientIndexes, i) } for _, q := range rws.queues { - q.Stop() + // A nil queue means that queue has been reused. + if q != nil { + q.Stop() + } } - rws.queues = newQueues - for _, q := range rws.queues { - q.Start() + for _, index := range newClientIndexes { + newQueues[index].Start() } + + rws.queues = newQueues + rws.hashes = newHashes + return nil } diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 031bdbd8b8d..0a31c4e54a4 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -219,7 +219,7 @@ func NewTemplateExpander( seconds := int64(v) % 60 minutes := (int64(v) / 60) % 60 hours := (int64(v) / 60 / 60) % 24 - days := (int64(v) / 60 / 60 / 24) + days := int64(v) / 60 / 60 / 24 // For days to minutes, we display seconds as an integer. if days != 0 { return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md index 12364b09fc1..8f23acc1907 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md @@ -78,7 +78,7 @@ - [ENHANCEMENT] When closing the db any running compaction will be cancelled so it doesn't block. - `NewLeveledCompactor` takes a context. - [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`. - - [BUGFIX] Improved Postings Merge performance. Fixes a regression from the the previous release. + - [BUGFIX] Improved Postings Merge performance. Fixes a regression from the previous release. - [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs. ## 0.4.0 @@ -91,7 +91,7 @@ - `OpenBlock` signature changed to take a logger. - [REMOVED] `PrefixMatcher` is considered unused so was removed. - [CLEANUP] `Options.WALFlushInterval` is removed as it wasn't used anywhere. - - [FEATURE] Add new `LiveReader` to WAL pacakge. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read. + - [FEATURE] Add new `LiveReader` to WAL package. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read. ## 0.3.1 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index b7771d6bbc3..0030fbd6854 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -201,6 +201,7 @@ type BlockMetaCompaction struct { const indexFilename = "index" const metaFilename = "meta.json" +const metaVersion1 = 1 func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } @@ -214,7 +215,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { if err := json.Unmarshal(b, &m); err != nil { return nil, 0, err } - if m.Version != 1 { + if m.Version != metaVersion1 { return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version) } @@ -222,7 +223,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { } func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { - meta.Version = 1 + meta.Version = metaVersion1 // Make any changes to the file appear atomic. path := filepath.Join(dir, metaFilename) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index c80aa070434..e29aedf5bac 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -286,6 +286,49 @@ func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { }, nil } +// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL. +// Samples that are in existing blocks will not be written to the new block. +// Note that if the read only database is running concurrently with a +// writable database then writing the WAL to the database directory can race. +func (db *DBReadOnly) FlushWAL(dir string) error { + blockReaders, err := db.Blocks() + if err != nil { + return errors.Wrap(err, "read blocks") + } + maxBlockTime := int64(math.MinInt64) + if len(blockReaders) > 0 { + maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime + } + w, err := wal.Open(db.logger, nil, filepath.Join(db.dir, "wal")) + if err != nil { + return err + } + head, err := NewHead(nil, db.logger, w, 1) + if err != nil { + return err + } + // Set the min valid time for the ingested wal samples + // to be no lower than the maxt of the last block. + if err := head.Init(maxBlockTime); err != nil { + return errors.Wrap(err, "read WAL") + } + mint := head.MinTime() + maxt := head.MaxTime() + rh := &rangeHead{ + head: head, + mint: mint, + maxt: maxt, + } + compactor, err := NewLeveledCompactor(context.Background(), nil, db.logger, DefaultOptions.BlockRanges, chunkenc.NewPool()) + if err != nil { + return errors.Wrap(err, "create leveled compactor") + } + // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). + // Because of this block intervals are always +1 than the total samples it includes. + _, err = compactor.Write(dir, rh, mint, maxt+1, nil) + return errors.Wrap(err, "writing WAL") +} + // Querier loads the wal and returns a new querier over the data partition for the given time range. // Current implementation doesn't support multiple Queriers. func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) { @@ -294,12 +337,12 @@ func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) { return nil, ErrClosed default: } - blocksReaders, err := db.Blocks() + blockReaders, err := db.Blocks() if err != nil { return nil, err } - blocks := make([]*Block, len(blocksReaders)) - for i, b := range blocksReaders { + blocks := make([]*Block, len(blockReaders)) + for i, b := range blockReaders { b, ok := b.(*Block) if !ok { return nil, errors.New("unable to convert a read only block to a normal block") @@ -380,7 +423,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } if len(loadable) == 0 { - return nil, errors.New("no blocks found") + return nil, nil } sort.Slice(loadable, func(i, j int) bool { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 219514ffe1f..6c9975a1362 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -1046,7 +1046,7 @@ func (h *Head) gc() { h.metrics.seriesRemoved.Add(float64(seriesRemoved)) h.metrics.chunksRemoved.Add(float64(chunksRemoved)) h.metrics.chunks.Sub(float64(chunksRemoved)) - // Using AddUint64 to substract series removed. + // Using AddUint64 to subtract series removed. // See: https://golang.org/pkg/sync/atomic/#AddUint64. atomic.AddUint64(&h.numSeries, ^uint64(seriesRemoved-1)) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index aba0f4dd1d3..357232e4615 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -328,6 +328,7 @@ func findSetMatches(pattern string) []string { func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) { var its, notIts []index.Postings // See which label must be non-empty. + // Optimization for case like {l=~".", l!="1"}. labelMustBeSet := make(map[string]bool, len(ms)) for _, m := range ms { if !m.Matches("") { @@ -336,9 +337,9 @@ func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, } for _, m := range ms { - matchesEmpty := m.Matches("") - if labelMustBeSet[m.Name()] || !matchesEmpty { + if labelMustBeSet[m.Name()] { // If this matcher must be non-empty, we can be smarter. + matchesEmpty := m.Matches("") nm, isNot := m.(*labels.NotMatcher) if isNot && matchesEmpty { // l!="foo" // If the label can't be empty and is a Not and the inner matcher @@ -444,7 +445,7 @@ func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error return index.Merge(rit...), nil } -// inversePostingsForMatcher eeturns the postings for the series with the label name set but not matching the matcher. +// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. func inversePostingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) { tpls, err := ix.LabelValues(m.Name()) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 494f005e422..0df612d3d48 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -55,7 +55,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { if err != nil { return wrapErr(err, d) } - if meta.Version == 1 { + if meta.Version == metaVersion1 { level.Info(logger).Log( "msg", "found healthy block", "mint", meta.MinTime, @@ -108,7 +108,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { return wrapErr(err, d) } // Reset version of meta.json to 1. - meta.Version = 1 + meta.Version = metaVersion1 if _, err := writeMetaFile(logger, d, meta); err != nil { return wrapErr(err, d) } @@ -126,7 +126,7 @@ func readBogusMetaFile(dir string) (*BlockMeta, error) { if err := json.Unmarshal(b, &m); err != nil { return nil, err } - if m.Version != 1 && m.Version != 2 { + if m.Version != metaVersion1 && m.Version != 2 { return nil, errors.Errorf("unexpected meta file version %d", m.Version) } return &m, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 4809889f32b..187feabd9df 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -280,7 +280,7 @@ func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error { return err } -// Reader returns a new reader over the the write ahead log data. +// Reader returns a new reader over the write ahead log data. // It must be completely consumed before writing to the WAL. func (w *SegmentWAL) Reader() WALReader { return &repairingWALReader{ diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go index 82081e321a0..6d64bb37ef9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -64,6 +64,14 @@ func (p *page) full() bool { return pageSize-p.alloc < recordHeaderSize } +func (p *page) reset() { + for i := range p.buf { + p.buf[i] = 0 + } + p.alloc = 0 + p.flushed = 0 +} + // Segment represents a segment file. type Segment struct { *os.File @@ -205,15 +213,16 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } registerMetrics(reg, w) - _, j, err := w.Segments() - // Index of the Segment we want to open and write to. - writeSegmentIndex := 0 + _, last, err := w.Segments() if err != nil { return nil, errors.Wrap(err, "get segment range") } + + // Index of the Segment we want to open and write to. + writeSegmentIndex := 0 // If some segments already exist create one with a higher index than the last segment. - if j != -1 { - writeSegmentIndex = j + 1 + if last != -1 { + writeSegmentIndex = last + 1 } segment, err := CreateSegment(w.dir, writeSegmentIndex) @@ -401,7 +410,7 @@ func (w *WAL) Repair(origErr error) error { return errors.Wrap(err, "delete corrupted segment") } - // Explicitly close the the segment we just repaired to avoid issues with Windows. + // Explicitly close the segment we just repaired to avoid issues with Windows. s.Close() // We always want to start writing to a new Segment rather than an existing @@ -493,11 +502,7 @@ func (w *WAL) flushPage(clear bool) error { // We flushed an entire page, prepare a new one. if clear { - for i := range p.buf { - p.buf[i] = 0 - } - p.alloc = 0 - p.flushed = 0 + p.reset() w.donePages++ w.pageCompletions.Inc() } diff --git a/vendor/github.com/prometheus/prometheus/util/httputil/cors.go b/vendor/github.com/prometheus/prometheus/util/httputil/cors.go index fdd5d3cd2c2..c8bdb3ac354 100644 --- a/vendor/github.com/prometheus/prometheus/util/httputil/cors.go +++ b/vendor/github.com/prometheus/prometheus/util/httputil/cors.go @@ -25,7 +25,7 @@ var corsHeaders = map[string]string{ "Vary": "Origin", } -// Enables cross-site script calls. +// SetCORS enables cross-site script calls. func SetCORS(w http.ResponseWriter, o *regexp.Regexp, r *http.Request) { origin := r.Header.Get("Origin") if origin == "" { diff --git a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go index 181bdde3b94..3fea3921fa6 100644 --- a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go +++ b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go @@ -54,7 +54,7 @@ func (s QueryTiming) String() string { } } -// Return a string representation of a QueryTiming span operation. +// SpanOperation returns a string representation of a QueryTiming span operation. func (s QueryTiming) SpanOperation() string { switch s { case EvalTotalTime: diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go index d3c9c926f12..5f1c31554ce 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go @@ -14,8 +14,13 @@ package testutil import ( + "crypto/sha256" + "io" "io/ioutil" "os" + "path/filepath" + "strconv" + "testing" ) const ( @@ -127,3 +132,51 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) { return } + +// DirSize returns the size in bytes of all files in a directory. +func DirSize(t *testing.T, path string) int64 { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + Ok(t, err) + if !info.IsDir() { + size += info.Size() + } + return nil + }) + Ok(t, err) + return size +} + +// DirHash returns a hash of all files attribites and their content within a directory. +func DirHash(t *testing.T, path string) []byte { + hash := sha256.New() + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + Ok(t, err) + + if info.IsDir() { + return nil + } + f, err := os.Open(path) + Ok(t, err) + defer f.Close() + + _, err = io.Copy(hash, f) + Ok(t, err) + + _, err = io.WriteString(hash, strconv.Itoa(int(info.Size()))) + Ok(t, err) + + _, err = io.WriteString(hash, info.Name()) + Ok(t, err) + + modTime, err := info.ModTime().GobEncode() + Ok(t, err) + + _, err = io.WriteString(hash, string(modTime)) + Ok(t, err) + return nil + }) + Ok(t, err) + + return hash.Sum(nil) +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go new file mode 100644 index 00000000000..839b86690be --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + "github.com/go-kit/kit/log" +) + +type logger struct { + t *testing.T +} + +// NewLogger returns a gokit compatible Logger which calls t.Log. +func NewLogger(t *testing.T) log.Logger { + return logger{t: t} +} + +// Log implements log.Logger. +func (t logger) Log(keyvals ...interface{}) error { + t.t.Log(keyvals...) + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go index 52abce5bf6c..39b44e53617 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go @@ -52,10 +52,11 @@ func Ok(tb TB, err error) { } // NotOk fails the test if an err is nil. -func NotOk(tb TB, err error, format string, a ...interface{}) { +func NotOk(tb TB, err error, a ...interface{}) { tb.Helper() if err == nil { if len(a) != 0 { + format := a[0].(string) tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a...) } tb.Fatalf("\033[31mexpected error, got none\033[39m") @@ -76,7 +77,7 @@ func formatMessage(msgAndArgs []interface{}) string { } if msg, ok := msgAndArgs[0].(string); ok { - return fmt.Sprintf("\nmsg: "+msg, msgAndArgs[1:]...) + return fmt.Sprintf("\n\nmsg: "+msg, msgAndArgs[1:]...) } return "" } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index d3aaaa98bf1..7d04f62ca1d 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -147,12 +147,13 @@ type API struct { flagsMap map[string]string ready func(http.HandlerFunc) http.HandlerFunc - db func() TSDBAdmin - enableAdmin bool - logger log.Logger - remoteReadSampleLimit int - remoteReadGate *gate.Gate - CORSOrigin *regexp.Regexp + db func() TSDBAdmin + enableAdmin bool + logger log.Logger + remoteReadSampleLimit int + remoteReadMaxBytesInFrame int + remoteReadGate *gate.Gate + CORSOrigin *regexp.Regexp } func init() { @@ -175,6 +176,7 @@ func NewAPI( rr rulesRetriever, remoteReadSampleLimit int, remoteReadConcurrencyLimit int, + remoteReadMaxBytesInFrame int, CORSOrigin *regexp.Regexp, ) *API { return &API{ @@ -183,17 +185,18 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - db: db, - enableAdmin: enableAdmin, - rulesRetriever: rr, - remoteReadSampleLimit: remoteReadSampleLimit, - remoteReadGate: gate.New(remoteReadConcurrencyLimit), - logger: logger, - CORSOrigin: CORSOrigin, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + db: db, + enableAdmin: enableAdmin, + rulesRetriever: rr, + remoteReadSampleLimit: remoteReadSampleLimit, + remoteReadGate: gate.New(remoteReadConcurrencyLimit), + remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame, + logger: logger, + CORSOrigin: CORSOrigin, } } @@ -841,7 +844,8 @@ func (api *API) serveFlags(r *http.Request) apiFuncResult { } func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { - if err := api.remoteReadGate.Start(r.Context()); err != nil { + ctx := r.Context() + if err := api.remoteReadGate.Start(ctx); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -856,78 +860,150 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { return } - resp := prompb.ReadResponse{ - Results: make([]*prompb.QueryResult, len(req.Queries)), + externalLabels := api.config().GlobalConfig.ExternalLabels.Map() + + sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels)) + for name, value := range externalLabels { + sortedExternalLabels = append(sortedExternalLabels, prompb.Label{ + Name: string(name), + Value: string(value), + }) } - for i, query := range req.Queries { - from, through, matchers, selectParams, err := remote.FromQuery(query) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + sort.Slice(sortedExternalLabels, func(i, j int) bool { + return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name + }) + + responseType, err := remote.NegotiateResponseType(req.AcceptedResponseTypes) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + switch responseType { + case prompb.ReadRequest_STREAMED_XOR_CHUNKS: + w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError) return } + for i, query := range req.Queries { + err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error { + + return remote.StreamChunkedReadResponses( + remote.NewChunkedWriter(w, f), + int64(i), + set, + sortedExternalLabels, + api.remoteReadMaxBytesInFrame, + ) + }) + if err != nil { + if httpErr, ok := err.(remote.HTTPError); ok { + http.Error(w, httpErr.Error(), httpErr.Status()) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + default: + w.Header().Set("Content-Type", "application/x-protobuf") + w.Header().Set("Content-Encoding", "snappy") - querier, err := api.Queryable.Querier(r.Context(), from, through) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + // On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response. + resp := prompb.ReadResponse{ + Results: make([]*prompb.QueryResult, len(req.Queries)), } - defer querier.Close() + for i, query := range req.Queries { + err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error { - // Change equality matchers which match external labels - // to a matcher that looks for an empty label, - // as that label should not be present in the storage. - externalLabels := api.config().GlobalConfig.ExternalLabels.Map() - filteredMatchers := make([]*labels.Matcher, 0, len(matchers)) - for _, m := range matchers { - value := externalLabels[m.Name] - if m.Type == labels.MatchEqual && value == m.Value { - matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "") + resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + return err + } + + for _, ts := range resp.Results[i].Timeseries { + ts.Labels = remote.MergeLabels(ts.Labels, sortedExternalLabels) + } + return nil + }) + if err != nil { + if httpErr, ok := err.(remote.HTTPError); ok { + http.Error(w, httpErr.Error(), httpErr.Status()) return } - filteredMatchers = append(filteredMatchers, matcher) - } else { - filteredMatchers = append(filteredMatchers, m) + http.Error(w, err.Error(), http.StatusInternalServerError) + return } } - set, _, err := querier.Select(selectParams, filteredMatchers...) - if err != nil { + if err := remote.EncodeReadResponse(&resp, w); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit) - if err != nil { - if httpErr, ok := err.(remote.HTTPError); ok { - http.Error(w, httpErr.Error(), httpErr.Status()) - return + } +} + +// filterExtLabelsFromMatchers change equality matchers which match external labels +// to a matcher that looks for an empty label, +// as that label should not be present in the storage. +func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) { + matchers, err := remote.FromLabelMatchers(pbMatchers) + if err != nil { + return nil, err + } + + filteredMatchers := make([]*labels.Matcher, 0, len(matchers)) + for _, m := range matchers { + value := externalLabels[m.Name] + if m.Type == labels.MatchEqual && value == m.Value { + matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "") + if err != nil { + return nil, err } - http.Error(w, err.Error(), http.StatusInternalServerError) - return + filteredMatchers = append(filteredMatchers, matcher) + } else { + filteredMatchers = append(filteredMatchers, m) } + } - // Add external labels back in, in sorted order. - sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels)) - for name, value := range externalLabels { - sortedExternalLabels = append(sortedExternalLabels, prompb.Label{ - Name: string(name), - Value: string(value), - }) - } - sort.Slice(sortedExternalLabels, func(i, j int) bool { - return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name - }) + return filteredMatchers, nil +} + +func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, externalLabels map[string]string, seriesHandleFn func(set storage.SeriesSet) error) error { + filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels) + if err != nil { + return err + } + + querier, err := api.Queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs) + if err != nil { + return err + } - for _, ts := range resp.Results[i].Timeseries { - ts.Labels = mergeLabels(ts.Labels, sortedExternalLabels) + var selectParams *storage.SelectParams + if query.Hints != nil { + selectParams = &storage.SelectParams{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, } } - if err := remote.EncodeReadResponse(&resp, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + defer func() { + if err := querier.Close(); err != nil { + level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error()) + } + }() + + set, _, err := querier.Select(selectParams, filteredMatchers...) + if err != nil { + return err } + return seriesHandleFn(set) } func (api *API) deleteSeries(r *http.Request) apiFuncResult { @@ -1067,33 +1143,6 @@ func convertMatcher(m *labels.Matcher) tsdbLabels.Matcher { panic("storage.convertMatcher: invalid matcher type") } -// mergeLabels merges two sets of sorted proto labels, preferring those in -// primary to those in secondary when there is an overlap. -func mergeLabels(primary, secondary []prompb.Label) []prompb.Label { - result := make([]prompb.Label, 0, len(primary)+len(secondary)) - i, j := 0, 0 - for i < len(primary) && j < len(secondary) { - if primary[i].Name < secondary[j].Name { - result = append(result, primary[i]) - i++ - } else if primary[i].Name > secondary[j].Name { - result = append(result, secondary[j]) - j++ - } else { - result = append(result, primary[i]) - i++ - j++ - } - } - for ; i < len(primary); i++ { - result = append(result, primary[i]) - } - for ; j < len(secondary); j++ { - result = append(result, secondary[j]) - } - return result -} - func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) { statusMessage := statusSuccess var warningStrings []string @@ -1217,10 +1266,23 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { } stream.WriteMore() stream.WriteRaw(`"`) - stream.WriteFloat64(p.V) + + // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround + // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). + buf := stream.Buffer() + abs := math.Abs(p.V) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + buf = strconv.AppendFloat(buf, p.V, fmt, -1, 64) + stream.SetBuffer(buf) + stream.WriteRaw(`"`) stream.WriteArrayEnd() - } func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index aa1c2b95cdd..e0364e9e7f6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -113,6 +113,17 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Exactlyf asserts that two objects are equal in value and type. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) @@ -157,6 +168,31 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool return FileExists(t, path, append([]interface{}{msg}, args...)...) } +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // @@ -289,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // @@ -300,6 +344,31 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf return Len(t, object, length, append([]interface{}{msg}, args...)...) } +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Less(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") @@ -444,6 +513,19 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) } +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Same(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index de39f794e72..26830403a9b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -215,6 +215,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { return Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -303,6 +325,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b return FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -567,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. return JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -589,6 +677,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in return Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -877,6 +1015,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . return Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 00000000000..15a486ca6e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,309 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return -1, true + } + if intobj1 == intobj2 { + return 0, true + } + if intobj1 < intobj2 { + return 1, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return -1, true + } + if int8obj1 == int8obj2 { + return 0, true + } + if int8obj1 < int8obj2 { + return 1, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return -1, true + } + if int16obj1 == int16obj2 { + return 0, true + } + if int16obj1 < int16obj2 { + return 1, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return -1, true + } + if int32obj1 == int32obj2 { + return 0, true + } + if int32obj1 < int32obj2 { + return 1, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return -1, true + } + if int64obj1 == int64obj2 { + return 0, true + } + if int64obj1 < int64obj2 { + return 1, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return -1, true + } + if uintobj1 == uintobj2 { + return 0, true + } + if uintobj1 < uintobj2 { + return 1, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return -1, true + } + if uint8obj1 == uint8obj2 { + return 0, true + } + if uint8obj1 < uint8obj2 { + return 1, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return -1, true + } + if uint16obj1 == uint16obj2 { + return 0, true + } + if uint16obj1 < uint16obj2 { + return 1, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return -1, true + } + if uint32obj1 == uint32obj2 { + return 0, true + } + if uint32obj1 < uint32obj2 { + return 1, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return -1, true + } + if uint64obj1 == uint64obj2 { + return 0, true + } + if uint64obj1 < uint64obj2 { + return 1, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return -1, true + } + if float32obj1 == float32obj2 { + return 0, true + } + if float32obj1 < float32obj2 { + return 1, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return -1, true + } + if float64obj1 == float64obj2 { + return 0, true + } + if float64obj1 < float64obj2 { + return 1, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return -1, true + } + if stringobj1 == stringobj2 { + return 0, true + } + if stringobj1 < stringobj2 { + return 1, true + } + } + } + + return 0, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 9bd4a80e484..044da8b01f2 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -18,6 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" + yaml "gopkg.in/yaml.v2" ) //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl @@ -350,6 +351,37 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) + if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { + return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) + } + + expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) + if expectedType != actualType { + return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", + expectedType, actualType), msgAndArgs...) + } + + if expected != actual { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + } + + return true +} + // formatUnequalValues takes two values of arbitrary types and returns string // representations appropriate to be presented to the user. // @@ -479,14 +511,14 @@ func isEmpty(object interface{}) bool { // collection types are empty when they have no element case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -629,7 +661,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ func includeElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) + listKind := reflect.TypeOf(list).Kind() defer func() { if e := recover(); e != nil { ok = false @@ -637,11 +669,12 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { } }() - if reflect.TypeOf(list).Kind() == reflect.String { + if listKind == reflect.String { + elementValue := reflect.ValueOf(element) return true, strings.Contains(listValue.String(), elementValue.String()) } - if reflect.TypeOf(list).Kind() == reflect.Map { + if listKind == reflect.Map { mapKeys := listValue.MapKeys() for i := 0; i < len(mapKeys); i++ { if ObjectsAreEqual(mapKeys[i].Interface(), element) { @@ -1337,6 +1370,24 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedYAMLAsInterface, actualYAMLAsInterface interface{} + + if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) +} + func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { t := reflect.TypeOf(v) k := t.Kind() @@ -1371,8 +1422,8 @@ func diff(expected interface{}, actual interface{}) string { e = spewConfig.Sdump(expected) a = spewConfig.Sdump(actual) } else { - e = expected.(string) - a = actual.(string) + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1414,3 +1465,34 @@ var spewConfig = spew.ConfigState{ type tHelper interface { Helper() } + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + timer := time.NewTimer(waitFor) + ticker := time.NewTicker(tick) + checkPassed := make(chan bool) + defer timer.Stop() + defer ticker.Stop() + defer close(checkPassed) + for { + select { + case <-timer.C: + return Fail(t, "Condition never satisfied", msgAndArgs...) + case result := <-checkPassed: + if result { + return true + } + case <-ticker.C: + go func() { + checkPassed <- condition() + }() + } + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 535f293490c..c5903f5dbb8 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -14,23 +14,23 @@ import ( // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if assert.Condition(t, comp, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Condition(t, comp, msgAndArgs...) { + return + } t.FailNow() } // Conditionf uses a Comparison to assert a complex condition. func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { - if assert.Conditionf(t, comp, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Conditionf(t, comp, msg, args...) { + return + } t.FailNow() } @@ -41,12 +41,12 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // assert.Contains(t, ["Hello", "World"], "World") // assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.Contains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Contains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -57,34 +57,34 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") // assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.Containsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Containsf(t, s, contains, msg, args...) { + return + } t.FailNow() } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.DirExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { - if assert.DirExistsf(t, path, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExistsf(t, path, msg, args...) { + return + } t.FailNow() } @@ -94,12 +94,12 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // // assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { - if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + return + } t.FailNow() } @@ -109,12 +109,12 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // // assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { - if assert.ElementsMatchf(t, listA, listB, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatchf(t, listA, listB, msg, args...) { + return + } t.FailNow() } @@ -123,12 +123,12 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Empty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Empty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -137,12 +137,12 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Emptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Emptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -154,12 +154,12 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Equal(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equal(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -169,12 +169,12 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // actualObj, err := SomeFunction() // assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if assert.EqualError(t, theError, errString, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualError(t, theError, errString, msgAndArgs...) { + return + } t.FailNow() } @@ -184,12 +184,12 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // actualObj, err := SomeFunction() // assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { - if assert.EqualErrorf(t, theError, errString, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualErrorf(t, theError, errString, msg, args...) { + return + } t.FailNow() } @@ -198,12 +198,12 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.EqualValues(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValues(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -212,12 +212,12 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // // assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.EqualValuesf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValuesf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -229,12 +229,12 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Equalf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equalf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -245,12 +245,12 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.Error(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Error(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -261,9 +261,37 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Errorf(t, err, msg, args...) { return } + t.FailNow() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } + if h, ok := t.(tHelper); ok { + h.Helper() + } + t.FailNow() +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -274,12 +302,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // // assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Exactly(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactly(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -287,56 +315,56 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Exactlyf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactlyf(t, expected, actual, msg, args...) { + return + } t.FailNow() } // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.Fail(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Fail(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNow fails test func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.FailNow(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNow(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNowf fails test func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.FailNowf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNowf(t, failureMessage, msg, args...) { + return + } t.FailNow() } // Failf reports a failure through func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.Failf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Failf(t, failureMessage, msg, args...) { + return + } t.FailNow() } @@ -344,12 +372,12 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.False(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.False(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -357,34 +385,96 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Falsef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Falsef(t, value, msg, args...) { + return + } t.FailNow() } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.FileExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FileExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.FileExistsf(t, path, msg, args...) { return } + t.FailNow() +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greater(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Greaterf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -395,12 +485,12 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -411,12 +501,12 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -427,12 +517,12 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -443,12 +533,12 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -458,12 +548,12 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -473,12 +563,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -488,12 +578,12 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -503,12 +593,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -518,12 +608,12 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -533,12 +623,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -546,12 +636,12 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.Implements(t, interfaceObject, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implements(t, interfaceObject, object, msgAndArgs...) { + return + } t.FailNow() } @@ -559,12 +649,12 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // // assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { - if assert.Implementsf(t, interfaceObject, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implementsf(t, interfaceObject, object, msg, args...) { + return + } t.FailNow() } @@ -572,56 +662,56 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaSlicef is the same as InDelta, except it compares two slices. func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } @@ -629,132 +719,216 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // // assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.IsType(t, expectedType, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } t.FailNow() } // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.IsTypef(t, expectedType, object, msg, args...) { return } + t.FailNow() +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Len(t, object, length, msgAndArgs...) { + return + } + t.FailNow() +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lenf(t, object, length, msg, args...) { + return + } t.FailNow() } -// JSONEq asserts that two JSON strings are equivalent. +// Less asserts that the first element is less than the second // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if assert.JSONEq(t, expected, actual, msgAndArgs...) { - return - } +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Less(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// JSONEqf asserts that two JSON strings are equivalent. +// LessOrEqual asserts that the first element is less than or equal to the second // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { - if assert.JSONEqf(t, expected, actual, msg, args...) { - return - } +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqual(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. +// LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if assert.Len(t, object, length, msgAndArgs...) { - return - } +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqualf(t, e1, e2, msg, args...) { + return + } t.FailNow() } -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. +// Lessf asserts that the first element is less than the second // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { - if assert.Lenf(t, object, length, msg, args...) { - return - } +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lessf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -762,12 +936,12 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Nil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -775,12 +949,12 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Nilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -791,12 +965,12 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.NoError(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoError(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -807,12 +981,12 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { - if assert.NoErrorf(t, err, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoErrorf(t, err, msg, args...) { + return + } t.FailNow() } @@ -823,12 +997,12 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { // assert.NotContains(t, ["Hello", "World"], "Earth") // assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.NotContains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -839,12 +1013,12 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") // assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.NotContainsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContainsf(t, s, contains, msg, args...) { + return + } t.FailNow() } @@ -855,12 +1029,12 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotEmpty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmpty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -871,12 +1045,12 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // assert.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotEmptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -887,12 +1061,12 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.NotEqual(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqual(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -903,12 +1077,12 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.NotEqualf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqualf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -916,12 +1090,12 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotNil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -929,12 +1103,12 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotNilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -942,12 +1116,12 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // // assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.NotPanics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -955,12 +1129,12 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.NotPanicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -969,12 +1143,12 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.NotRegexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -983,12 +1157,12 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { - if assert.NotRegexpf(t, rx, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexpf(t, rx, str, msg, args...) { + return + } t.FailNow() } @@ -997,12 +1171,12 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // // assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.NotSubset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1011,34 +1185,34 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.NotSubsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } // NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.NotZero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZero(t, i, msgAndArgs...) { + return + } t.FailNow() } // NotZerof asserts that i is not the zero value for its type. func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.NotZerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZerof(t, i, msg, args...) { + return + } t.FailNow() } @@ -1046,12 +1220,12 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // // assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.Panics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1060,12 +1234,12 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1074,12 +1248,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // // assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.PanicsWithValuef(t, expected, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValuef(t, expected, f, msg, args...) { + return + } t.FailNow() } @@ -1087,12 +1261,12 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // // assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.Panicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -1101,12 +1275,12 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.Regexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Regexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -1115,12 +1289,44 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Regexpf(t, rx, str, msg, args...) { return } + t.FailNow() +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Same(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Samef(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -1129,12 +1335,12 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // // assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.Subset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1143,12 +1349,12 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // // assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.Subsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } @@ -1156,12 +1362,12 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.True(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.True(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -1169,12 +1375,12 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Truef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Truef(t, value, msg, args...) { + return + } t.FailNow() } @@ -1182,12 +1388,12 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } @@ -1195,33 +1401,33 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // // assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { - if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.Zero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zero(t, i, msgAndArgs...) { + return + } t.FailNow() } // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.Zerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zerof(t, i, msg, args...) { + return + } t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 6ffc751b5e5..55e42ddebdc 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,6 +1,6 @@ {{.Comment}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { - if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 9fe41dbdc0c..804fae035bc 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -216,6 +216,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -304,6 +326,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -568,6 +640,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -590,6 +678,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -878,6 +1016,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/modules.txt b/vendor/modules.txt index 793e0548dae..4af1ba7c1bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -40,7 +40,7 @@ github.com/alecthomas/units github.com/armon/go-metrics # github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.22.4 +# github.com/aws/aws-sdk-go v1.23.12 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil @@ -63,6 +63,7 @@ github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults @@ -377,7 +378,7 @@ github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.6.0 +# github.com/prometheus/common v0.7.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -387,7 +388,7 @@ github.com/prometheus/common/version # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f +# github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/azure @@ -463,7 +464,7 @@ github.com/sirupsen/logrus github.com/soheilhy/cmux # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag -# github.com/stretchr/testify v1.3.0 +# github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/assert github.com/stretchr/testify/require # github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d