diff --git a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml b/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml index a2ba4568c5..5405548e2a 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml +++ b/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: gcr.io/k8s-cluster-api/cluster-api-controller:v0.1.4 + - image: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.1.9 name: manager diff --git a/go.mod b/go.mod index b026fece6e..4a2bb171a9 100644 --- a/go.mod +++ b/go.mod @@ -3,86 +3,30 @@ module sigs.k8s.io/cluster-api-provider-openstack go 1.12 require ( - cloud.google.com/go v0.40.0 // indirect - github.com/Azure/go-autorest/autorest v0.5.0 // indirect github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd // indirect - github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect github.com/coreos/container-linux-config-transpiler v0.9.0 - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect - github.com/coreos/ignition v0.32.0 // indirect - github.com/go-logr/logr v0.1.0 // indirect - github.com/go-logr/zapr v0.1.1 // indirect - github.com/gobuffalo/envy v1.7.0 // indirect - github.com/gogo/protobuf v1.2.1 // indirect - github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/google/btree v1.0.0 // indirect - github.com/google/gofuzz v1.0.0 // indirect - github.com/google/uuid v1.1.1 // indirect - github.com/googleapis/gnostic v0.3.0 // indirect + github.com/coreos/ignition v0.33.0 // indirect github.com/gophercloud/gophercloud v0.3.0 github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/imdario/mergo v0.3.7 // indirect - github.com/markbates/inflect v1.0.4 // indirect - github.com/pborman/uuid v1.2.0 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v0.9.4 // indirect - github.com/rogpeppe/go-internal v1.3.0 // indirect - github.com/spf13/cobra v0.0.5 // indirect github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb // indirect - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.10.0 // indirect - go4.org v0.0.0-20190313082347-94abd6928b1d // indirect - golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect - golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect - golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect - golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect - google.golang.org/appengine v1.6.1 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.2 - k8s.io/api v0.0.0 - k8s.io/apiextensions-apiserver v0.0.0-00010101000000-000000000000 // indirect - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/cluster-bootstrap v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0-00010101000000-000000000000 // indirect - k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a // indirect - k8s.io/klog v0.3.3 - k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 // indirect + k8s.io/api v0.0.0-20190814101207-0772a1bdf941 + k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6 + k8s.io/client-go v10.0.0+incompatible + k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 + k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b + k8s.io/klog v0.4.0 k8s.io/kubernetes v1.13.4 - k8s.io/utils v0.0.0-20190801114015-581e00157fb1 // indirect - sigs.k8s.io/cluster-api v0.1.4 + sigs.k8s.io/cluster-api v0.1.9 sigs.k8s.io/controller-runtime v0.1.12 - sigs.k8s.io/controller-tools v0.1.11 + sigs.k8s.io/controller-tools v0.1.12 sigs.k8s.io/testing_frameworks v0.1.1 sigs.k8s.io/yaml v1.1.0 ) replace ( k8s.io/api => k8s.io/api v0.0.0-20190222213804-5cb15d344471 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 - k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190228174905-79427f02047f - k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20190228180923-a9e421a79326 - k8s.io/client-go => k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20190228181926-f531db154915 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 - k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b - k8s.io/component-base => k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea - k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190531030430-6117653b35f1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20190620090116-299a7b270edc - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20190228175259-3e0149950b0e - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20190228181625-2d4c0aa6bcf3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20190228181220-93400f02525d - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20190228181501-284499a24813 - k8s.io/kubelet => k8s.io/kubelet v0.0.0-20190228181339-b2baec5a487a - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20190620090156-2138f2c9de18 - k8s.io/metrics => k8s.io/metrics v0.0.0-20190228180609-34472d076c30 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20190228175645-7167784fdc46 ) diff --git a/go.sum b/go.sum index 80137b534f..6e93bf03dc 100644 --- a/go.sum +++ b/go.sum @@ -1,111 +1,265 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= -cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNbLOWszfK6KrY= -github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= -github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= -github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +cloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8= +cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= +contrib.go.opencensus.io/exporter/ocagent v0.2.0 h1:Q/jXnVbliDYozuWJni9452xsSUuo+y8yrioxRgofBhE= +contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/go-autorest v11.5.0+incompatible h1:zp9GQJhEX+EBqEYC2MEGQ+gjKFEPRAWtfwcmstS2hGk= +github.com/Azure/go-autorest v11.5.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd h1:NlKlOv3aVJ5ODMC0JWPvddw05KENkL3cZttIuu8kJRo= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.1.0 h1:VwZ9smxzX8u14/125wHIX7ARV+YhR+L4JADswwxWK0Y= +github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/coreos/container-linux-config-transpiler v0.9.0 h1:UBGpT8qWqzi48hNLrzMAgAUNJsR0LW8Gk5/dR/caI8U= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/ignition v0.32.0 h1:uMFA1tWr7B9kq2Ku4Bmo61G4gUm/vHsOuognuYaS0yg= -github.com/coreos/ignition v0.32.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/ignition v0.33.0 h1:rYJoGv5v/5rCJAzyMaE9gU8pn7w7pv0M4rDzHvDK6T4= +github.com/coreos/ignition v0.33.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= -github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= +github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= +github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/envy v1.6.12 h1:zkhss8DXz/pty2HAyA8BnvWMTYxo4gjd4+WCnYovoxY= +github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 h1:51NF9n6h4nGMooU5ALB+uJCM0UOmcWjAegIZb/ePtoE= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= +github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= +github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -113,25 +267,30 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 h1:QQqgDN0yxFHrhPV1BWFGP6hEZ82s18CGzu/bLQKT8RY= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -139,269 +298,383 @@ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= -github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.1.0 h1:IxU7wGikQPAcoOd3/f4Ol7+vIKS1Sgu08tzjktR4nJE= +github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb h1:lyL3z7vYwTWXf4/bI+A01+cCSnfhKIBhy+SQ46Z/ml8= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= +go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20190313082347-94abd6928b1d h1:JkRdGP3zvTtTbabWSAC6n67ka30y7gOzWAah4XYJSfw= -go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5 h1:+hE86LblG4AyDgwMCLTE6FOlM9+qjHSYS+rKqxUVdsM= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c h1:pcBdqVcrlT+A3i+tWsOROFONQyey9tisIQHI4xqVGLg= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b h1:QhDb4isMLF+1hiAgAqj1YPRPQh+eAqwfG6wJzp57Iuo= +golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 h1:SdCO7As+ChE1iV3IjBleIIWlj8VjZWuYEUF5pjELOOQ= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIgYKCWEtYQkcNrTpW+lMO4GJy8NP6SVQmM= k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 h1:aE8wOCKuoRs2aU0OP/Rz8SXiAB0FTTku3VtGhhrkSmc= -k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 h1:C6Q0O7LHTsykhy2KMOfov/wJGeGOLOEMmVSn5bOxfxI= -k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= +k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= +k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 h1:ClhfUBE7Q3tl31AdmaxSUP/o6UeYO9K3i4vWGiQPPG0= +k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87/go.mod h1:CD9HJ5mZ7v2e4GaFDhtg+bLIFqkVrnIX7xYkZOS6Q6I= k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b h1:KH0fUlgdFZH8UMxJ/FDCYHpczfSQKefetq5NjL6BVF0= k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea h1:CAN9Jo6bb5+mp3s1/1w8TbaWIxYK1WGQwwYvlacSet4= -k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea/go.mod h1:VLedAFwENz2swOjm0zmUXpAP2mV55c49xgaOzPBI/QQ= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d h1:rnHKkkKa8rR4DkemPeIzuL+/Ks2zZj6d3OV/HYD0t/E= +k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d/go.mod h1:lD66vYoxfIYwZTZunUYSg/e6fEj0fcgked2esAmsIfA= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kubernetes v1.13.4 h1:gQqFv/pH8hlbznLXQUsi8s5zqYnv0slmUDl/yVA0EWc= k8s.io/kubernetes v1.13.4/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/cluster-api v0.1.4 h1:QKY3hJMFTKP8N7sZIELR8jVFw9yGm8eeZMnj/1E36u4= -sigs.k8s.io/cluster-api v0.1.4/go.mod h1:aEXstFx3krpj6/AOcV/rOP2VLKdrtSNUMDR2Kmt6YSs= +k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a h1:2jUDc9gJja832Ftp+QbDV0tVhQHMISFn01els+2ZAcw= +k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/cluster-api v0.1.9 h1:pSkEW8Ws46jtHM1KUsFFhf0phY36hljxd5O75EXy9wI= +sigs.k8s.io/cluster-api v0.1.9/go.mod h1:MTxMmx3MSFZNi0RAjj64X+RurP9EEdhrRwLzdFZKTYw= sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo= sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= sigs.k8s.io/controller-tools v0.1.11 h1:1ln8Ipmg2xosbeHmnlzG53kwO1f3yf9l6auvCHdbZpI= sigs.k8s.io/controller-tools v0.1.11/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/controller-tools v0.1.12 h1:LW8Tfywz+epjYiySSOYWFQl1O1y0os+ZWf22XJmsFww= +sigs.k8s.io/controller-tools v0.1.12/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000000..c364af1da0 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000000..3b3cbed98e --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml index ee417bbe6b..192981b7f6 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.11.x + - 1.10.x go_import_path: contrib.go.opencensus.io/exporter/ocagent @@ -13,6 +13,5 @@ script: - go build ./... # Ensure dependency updates don't break build - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - go vet ./... - - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled - - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules. + - go test -v -race $PKGS # Run all the tests with the race detector enabled - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md index 3b9e908f59..64550ee609 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -14,7 +14,7 @@ Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core l ## Installation ```bash -$ go get -u contrib.go.opencensus.io/exporter/ocagent +$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1 ``` ## Usage @@ -26,7 +26,7 @@ import ( "log" "time" - "contrib.go.opencensus.io/exporter/ocagent" + "contrib.go.opencensus.io/exporter/ocagent/v1" "go.opencensus.io/trace" ) diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go deleted file mode 100644 index 3a9beda436..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math/rand" - "sync/atomic" - "time" -) - -const ( - sDisconnected int32 = 5 + iota - sConnected -) - -func (ae *Exporter) setStateDisconnected() { - atomic.StoreInt32(&ae.connectionState, sDisconnected) - select { - case ae.disconnectedCh <- true: - default: - } -} - -func (ae *Exporter) setStateConnected() { - atomic.StoreInt32(&ae.connectionState, sConnected) -} - -func (ae *Exporter) connected() bool { - return atomic.LoadInt32(&ae.connectionState) == sConnected -} - -const defaultConnReattemptPeriod = 10 * time.Second - -func (ae *Exporter) indefiniteBackgroundConnection() error { - defer func() { - ae.backgroundConnectionDoneCh <- true - }() - - connReattemptPeriod := ae.reconnectionPeriod - if connReattemptPeriod <= 0 { - connReattemptPeriod = defaultConnReattemptPeriod - } - - // No strong seeding required, nano time can - // already help with pseudo uniqueness. - rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) - - // maxJitter: 1 + (70% of the connectionReattemptPeriod) - maxJitter := int64(1 + 0.7*float64(connReattemptPeriod)) - - for { - // Otherwise these will be the normal scenarios to enable - // reconnections if we trip out. - // 1. If we've stopped, return entirely - // 2. Otherwise block until we are disconnected, and - // then retry connecting - select { - case <-ae.stopCh: - return errStopped - - case <-ae.disconnectedCh: - // Normal scenario that we'll wait for - } - - if err := ae.connect(); err == nil { - ae.setStateConnected() - } else { - ae.setStateDisconnected() - } - - // Apply some jitter to avoid lockstep retrials of other - // agent-exporters. Lockstep retrials could result in an - // innocent DDOS, by clogging the machine's resources and network. - jitter := time.Duration(rng.Int63n(maxJitter)) - <-time.After(connReattemptPeriod + jitter) - } -} - -func (ae *Exporter) connect() error { - cc, err := ae.dialToAgent() - if err != nil { - return err - } - return ae.enableConnectionStreams(cc) -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod b/vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod index 72a9068e41..410687e9a9 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod @@ -1,10 +1,9 @@ module contrib.go.opencensus.io/exporter/ocagent require ( - github.com/census-instrumentation/opencensus-proto v0.2.0 // this is to match the version used in census-instrumentation/opencensus-service - github.com/golang/protobuf v1.3.1 - github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect - go.opencensus.io v0.20.2 - google.golang.org/api v0.3.1 - google.golang.org/grpc v1.19.1 + github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a + github.com/golang/protobuf v1.2.0 + go.opencensus.io v0.17.0 + google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf + google.golang.org/grpc v1.15.0 ) diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum b/vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum index 52c362aaa9..8bf9888c25 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum @@ -1,130 +1,44 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.0.1 h1:4v5I+ax5jCmwTYVaWQacX8ZSxvUZemBX4UwBGSkDeoA= +github.com/census-instrumentation/opencensus-proto v0.0.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a h1:t88pXOTS5K+pjfuhTOcul6sdC4khgqB8ukyfbe62Zxo= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +go.opencensus.io v0.17.0 h1:2Cu88MYg+1LU+WVD+NWwYhyP0kKgRlN9QjWGaX0jKTE= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0 h1:Az/KuahOM4NAidTEuJCv/RonAA7rYsTPkqXVjr+8OOw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go index dd41c349a2..5b9a058f39 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go @@ -21,12 +21,7 @@ import ( "go.opencensus.io" ) -// NodeWithStartTime creates a node using nodeName and derives: -// Hostname from the environment -// Pid from the current process -// StartTimestamp from the start time of this process -// Language and library information. -func NodeWithStartTime(nodeName string) *commonpb.Node { +func createNodeInfo(nodeName string) *commonpb.Node { return &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{ HostName: os.Getenv("HOSTNAME"), diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go index 510da99ea5..ab4ba3c183 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go @@ -23,19 +23,11 @@ import ( "google.golang.org/api/support/bundler" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "go.opencensus.io/plugin/ocgrpc" - "go.opencensus.io/resource" - "go.opencensus.io/stats/view" "go.opencensus.io/trace" - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + agentcommonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" ) @@ -49,43 +41,22 @@ func init() { } var _ trace.Exporter = (*Exporter)(nil) -var _ view.Exporter = (*Exporter)(nil) type Exporter struct { - connectionState int32 - // mu protects the non-atomic and non-channel variables - mu sync.RWMutex - // senderMu protects the concurrent unsafe traceExporter client - senderMu sync.RWMutex - started bool - stopped bool - agentAddress string - serviceName string - canDialInsecure bool - traceExporter agenttracepb.TraceService_ExportClient - metricsExporter agentmetricspb.MetricsService_ExportClient - nodeInfo *commonpb.Node - grpcClientConn *grpc.ClientConn - reconnectionPeriod time.Duration - resource *resourcepb.Resource - compressor string - headers map[string]string - - startOnce sync.Once - stopCh chan bool - disconnectedCh chan bool - - backgroundConnectionDoneCh chan bool + mu sync.RWMutex + started bool + stopped bool + agentPort uint16 + agentAddress string + serviceName string + canDialInsecure bool + traceSvcClient agenttracepb.TraceServiceClient + traceExporter agenttracepb.TraceService_ExportClient + nodeInfo *agentcommonpb.Node + grpcClientConn *grpc.ClientConn traceBundler *bundler.Bundler - - // viewDataBundler is the bundler to enable conversion - // from OpenCensus-Go view.Data to metricspb.Metric. - // Please do not confuse it with metricsBundler! - viewDataBundler *bundler.Bundler - - clientTransportCredentials credentials.TransportCredentials } func NewExporter(opts ...ExporterOption) (*Exporter, error) { @@ -106,22 +77,16 @@ func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) { for _, opt := range opts { opt.withExporter(e) } + if e.agentPort <= 0 { + e.agentPort = DefaultAgentPort + } traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) { e.uploadTraces(bundle.([]*trace.SpanData)) }) traceBundler.DelayThreshold = 2 * time.Second traceBundler.BundleCountThreshold = spanDataBufferSize e.traceBundler = traceBundler - - viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { - e.uploadViewData(bundle.([]*view.Data)) - }) - viewDataBundler.DelayThreshold = 2 * time.Second - viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable. - e.viewDataBundler = viewDataBundler - e.nodeInfo = NodeWithStartTime(e.serviceName) - e.resource = resourceProtoFromEnv() - + e.nodeInfo = createNodeInfo(e.serviceName) return e, nil } @@ -130,34 +95,26 @@ const ( maxInitialTracesRetries = 10 ) -var ( - errAlreadyStarted = errors.New("already started") - errNotStarted = errors.New("not started") - errStopped = errors.New("stopped") - errNoConnection = errors.New("no active connection") -) - // Start dials to the agent, establishing a connection to it. It also // initiates the Config and Trace services by sending over the initial -// messages that consist of the node identifier. Start invokes a background -// connector that will reattempt connections to the agent periodically -// if the connection dies. +// messages that consist of the node identifier. Start performs a best case +// attempt to try to send the initial messages, by applying exponential +// backoff at most 10 times. func (ae *Exporter) Start() error { - var err = errAlreadyStarted - ae.startOnce.Do(func() { - ae.mu.Lock() - defer ae.mu.Unlock() + ae.mu.Lock() + defer ae.mu.Unlock() + err := ae.doStartLocked() + if err == nil { ae.started = true - ae.disconnectedCh = make(chan bool, 1) - ae.stopCh = make(chan bool) - ae.backgroundConnectionDoneCh = make(chan bool) - - ae.setStateDisconnected() - go ae.indefiniteBackgroundConnection() + return nil + } - err = nil - }) + // Otherwise we have an error and should clean up to avoid leaking resources. + ae.started = false + if ae.grpcClientConn != nil { + ae.grpcClientConn.Close() + } return err } @@ -166,65 +123,51 @@ func (ae *Exporter) prepareAgentAddress() string { if ae.agentAddress != "" { return ae.agentAddress } - return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort) -} - -func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error { - ae.mu.RLock() - started := ae.started - nodeInfo := ae.nodeInfo - ae.mu.RUnlock() - - if !started { - return errNotStarted + port := DefaultAgentPort + if ae.agentPort > 0 { + port = ae.agentPort } + return fmt.Sprintf("%s:%d", DefaultAgentHost, port) +} - ae.mu.Lock() - // If the previous clientConn was non-nil, close it - if ae.grpcClientConn != nil { - _ = ae.grpcClientConn.Close() +func (ae *Exporter) doStartLocked() error { + if ae.started { + return nil } - ae.grpcClientConn = cc - ae.mu.Unlock() - if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil { + // Now start it + cc, err := ae.dialToAgent() + if err != nil { return err } + ae.grpcClientConn = cc - return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo) -} - -func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { // Initiate the trace service by sending over node identifier info. traceSvcClient := agenttracepb.NewTraceServiceClient(cc) - ctx := context.Background() - if len(ae.headers) > 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - traceExporter, err := traceSvcClient.Export(ctx) + traceExporter, err := traceSvcClient.Export(context.Background()) if err != nil { return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) } - firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := traceExporter.Send(firstTraceMessage); err != nil { + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo} + err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error { + return traceExporter.Send(firstTraceMessage) + }) + if err != nil { return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) } - - ae.mu.Lock() ae.traceExporter = traceExporter - ae.mu.Unlock() // Initiate the config service by sending over node identifier info. configStream, err := traceSvcClient.Config(context.Background()) if err != nil { return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) } - firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} - if err := configStream.Send(firstCfgMessage); err != nil { + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo} + err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error { + return configStream.Send(firstCfgMessage) + }) + if err != nil { return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) } @@ -235,52 +178,32 @@ func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *comm return nil } -func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { - metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) - metricsExporter, err := metricsSvcClient.Export(context.Background()) - if err != nil { - return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) - } - // Initiate the metrics service by sending over the first message just containing the Node and Resource. - firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := metricsExporter.Send(firstMetricsMessage); err != nil { - return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) - } - - ae.mu.Lock() - ae.metricsExporter = metricsExporter - ae.mu.Unlock() - - // With that we are good to go and can start sending metrics - return nil -} - +// dialToAgent performs a best case attempt to dial to the agent. +// It retries failed dials with: +// * gRPC dialTimeout of 1s +// * exponential backoff, 5 times with a period of 50ms +// hence in the worst case of (no agent actually available), it +// will take at least: +// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { addr := ae.prepareAgentAddress() - var dialOpts []grpc.DialOption - if ae.clientTransportCredentials != nil { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) - } else if ae.canDialInsecure { + dialOpts := []grpc.DialOption{grpc.WithBlock()} + if ae.canDialInsecure { dialOpts = append(dialOpts, grpc.WithInsecure()) } - if ae.compressor != "" { - dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) - } - dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) - ctx := context.Background() - if len(ae.headers) > 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - return grpc.DialContext(ctx, addr, dialOpts...) + var cc *grpc.ClientConn + dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second)) + dialBackoffWaitPeriod := 50 * time.Millisecond + err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error { + var err error + cc, err = grpc.Dial(addr, dialOpts...) + return err + }) + return cc, err } func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { - // Note: We haven't yet implemented configuration sending so we - // should NOT be changing connection states within this function for now. for { recv, err := configStream.Recv() if err != nil { @@ -296,7 +219,7 @@ func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService if psamp := cfg.GetProbabilitySampler(); psamp != nil { trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) } else if csamp := cfg.GetConstantSampler(); csamp != nil { - alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + alwaysSample := csamp.Decision == true if alwaysSample { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) } else { @@ -313,19 +236,20 @@ func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService } } +var ( + errNotStarted = errors.New("not started") +) + // Stop shuts down all the connections and resources // related to the exporter. func (ae *Exporter) Stop() error { - ae.mu.RLock() - cc := ae.grpcClientConn - started := ae.started - stopped := ae.stopped - ae.mu.RUnlock() + ae.mu.Lock() + defer ae.mu.Unlock() - if !started { + if !ae.started { return errNotStarted } - if stopped { + if ae.stopped { // TODO: tell the user that we've already stopped, so perhaps a sentinel error? return nil } @@ -334,19 +258,13 @@ func (ae *Exporter) Stop() error { // Now close the underlying gRPC connection. var err error - if cc != nil { - err = cc.Close() + if ae.grpcClientConn != nil { + err = ae.grpcClientConn.Close() } // At this point we can change the state variables: started and stopped - ae.mu.Lock() ae.started = false ae.stopped = true - ae.mu.Unlock() - close(ae.stopCh) - - // Ensure that the backgroundConnector returns - <-ae.backgroundConnectionDoneCh return err } @@ -355,44 +273,12 @@ func (ae *Exporter) ExportSpan(sd *trace.SpanData) { if sd == nil { return } - _ = ae.traceBundler.Add(sd, 1) -} - -func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { - if batch == nil || len(batch.Spans) == 0 { - return nil - } - - select { - case <-ae.stopCh: - return errStopped - - default: - if !ae.connected() { - return errNoConnection - } - - ae.senderMu.Lock() - err := ae.traceExporter.Send(batch) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - return err - } - return nil - } + _ = ae.traceBundler.Add(sd, -1) } -func (ae *Exporter) ExportView(vd *view.Data) { - if vd == nil { - return - } - _ = ae.viewDataBundler.Add(vd, 1) -} - -func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { if len(sdl) == 0 { - return nil + return } protoSpans := make([]*tracepb.Span, 0, len(sdl)) for _, sd := range sdl { @@ -400,97 +286,14 @@ func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) } } - return protoSpans -} - -func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { - select { - case <-ae.stopCh: - return - default: - if !ae.connected() { - return - } - - protoSpans := ocSpanDataToPbSpans(sdl) - if len(protoSpans) == 0 { - return - } - ae.senderMu.Lock() - err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + if len(protoSpans) > 0 { + _ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ Spans: protoSpans, }) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - } - } -} - -func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { - if len(vdl) == 0 { - return nil - } - metrics := make([]*metricspb.Metric, 0, len(vdl)) - for _, vd := range vdl { - if vd != nil { - vmetric, err := viewDataToMetric(vd) - // TODO: (@odeke-em) somehow report this error, if it is non-nil. - if err == nil && vmetric != nil { - metrics = append(metrics, vmetric) - } - } - } - return metrics -} - -func (ae *Exporter) uploadViewData(vdl []*view.Data) { - select { - case <-ae.stopCh: - return - - default: - if !ae.connected() { - return - } - - protoMetrics := ocViewDataToPbMetrics(vdl) - if len(protoMetrics) == 0 { - return - } - err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ - Metrics: protoMetrics, - // TODO:(@odeke-em) - // a) Figure out how to derive a Node from the environment - // b) Figure out how to derive a Resource from the environment - // or better letting users of the exporter configure it. - }) - if err != nil { - ae.setStateDisconnected() - } } } func (ae *Exporter) Flush() { ae.traceBundler.Flush() - ae.viewDataBundler.Flush() -} - -func resourceProtoFromEnv() *resourcepb.Resource { - rs, _ := resource.FromEnv(context.Background()) - if rs == nil { - return nil - } - - rprs := &resourcepb.Resource{ - Type: rs.Type, - } - if rs.Labels != nil { - rprs.Labels = make(map[string]string) - for k, v := range rs.Labels { - rprs.Labels[k] = v - } - } - return rprs } diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go index 3e05ae8b30..751f311f1f 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -14,12 +14,6 @@ package ocagent -import ( - "time" - - "google.golang.org/grpc/credentials" -) - const ( DefaultAgentPort uint16 = 55678 DefaultAgentHost string = "localhost" @@ -29,6 +23,14 @@ type ExporterOption interface { withExporter(e *Exporter) } +type portSetter uint16 + +func (ps portSetter) withExporter(e *Exporter) { + e.agentPort = uint16(ps) +} + +var _ ExporterOption = (*portSetter)(nil) + type insecureGrpcConnection int var _ ExporterOption = (*insecureGrpcConnection)(nil) @@ -42,6 +44,12 @@ func (igc *insecureGrpcConnection) withExporter(e *Exporter) { // does. Note, by default, client security is required unless WithInsecure is used. func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } +// WithPort allows one to override the port that the exporter will +// connect to the agent on, instead of using DefaultAgentPort. +func WithPort(port uint16) ExporterOption { + return portSetter(port) +} + type addressSetter string func (as addressSetter) withExporter(e *Exporter) { @@ -70,59 +78,3 @@ var _ ExporterOption = (*serviceNameSetter)(nil) func WithServiceName(serviceName string) ExporterOption { return serviceNameSetter(serviceName) } - -type reconnectionPeriod time.Duration - -func (rp reconnectionPeriod) withExporter(e *Exporter) { - e.reconnectionPeriod = time.Duration(rp) -} - -func WithReconnectionPeriod(rp time.Duration) ExporterOption { - return reconnectionPeriod(rp) -} - -type compressorSetter string - -func (c compressorSetter) withExporter(e *Exporter) { - e.compressor = string(c) -} - -// UseCompressor will set the compressor for the gRPC client to use when sending requests. -// It is the responsibility of the caller to ensure that the compressor set has been registered -// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some -// compressors auto-register on import, such as gzip, which can be registered by calling -// `import _ "google.golang.org/grpc/encoding/gzip"` -func UseCompressor(compressorName string) ExporterOption { - return compressorSetter(compressorName) -} - -type headerSetter map[string]string - -func (h headerSetter) withExporter(e *Exporter) { - e.headers = map[string]string(h) -} - -// WithHeaders will send the provided headers when the gRPC stream connection -// is instantiated -func WithHeaders(headers map[string]string) ExporterOption { - return headerSetter(headers) -} - -type clientCredentials struct { - credentials.TransportCredentials -} - -var _ ExporterOption = (*clientCredentials)(nil) - -// WithTLSCredentials allows the connection to use TLS credentials -// when talking to the server. It takes in grpc.TransportCredentials instead -// of say a Certificate file or a tls.Certificate, because the retrieving -// these credentials can be done in many ways e.g. plain file, in code tls.Config -// or by certificate rotation, so it is up to the caller to decide what to use. -func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { - return &clientCredentials{TransportCredentials: creds} -} - -func (cc *clientCredentials) withExporter(e *Exporter) { - e.clientTransportCredentials = cc.TransportCredentials -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go index 983ebe7b70..ba7b51d206 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -15,7 +15,6 @@ package ocagent import ( - "math" "time" "go.opencensus.io/trace" @@ -25,11 +24,6 @@ import ( "github.com/golang/protobuf/ptypes/timestamp" ) -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 -) - func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { if sd == nil { return nil @@ -49,7 +43,6 @@ func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), Name: namePtr, Attributes: ocAttributesToProtoAttributes(sd.Attributes), - TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), } } @@ -129,85 +122,6 @@ func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_A } } -// This code is mostly copied from -// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 -func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { - if len(as) == 0 && len(es) == 0 { - return nil - } - - timeEvents := &tracepb.Span_TimeEvents{} - var annotations, droppedAnnotationsCount int - var messageEvents, droppedMessageEventsCount int - - // Transform annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotations++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(a.Time), - Value: transformAnnotationToTimeEvent(&a), - }, - ) - } - - // Transform message events - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(e.Time), - Value: transformMessageEventToTimeEvent(&e), - }, - ) - } - - // Process dropped counter - timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - - return timeEvents -} - -func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { - return &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: &tracepb.TruncatableString{Value: a.Message}, - Attributes: ocAttributesToProtoAttributes(a.Attributes), - }, - } -} - -func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { - return &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: uint64(e.MessageID), - UncompressedSize: uint64(e.UncompressedByteSize), - CompressedSize: uint64(e.CompressedByteSize), - }, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} - func timeToTimestamp(t time.Time) *timestamp.Timestamp { nanoTime := t.UnixNano() return ×tamp.Timestamp{ diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go deleted file mode 100644 index 43f18dec19..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "errors" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/golang/protobuf/ptypes/timestamp" - - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" -) - -var ( - errNilMeasure = errors.New("expecting a non-nil stats.Measure") - errNilView = errors.New("expecting a non-nil view.View") - errNilViewData = errors.New("expecting a non-nil view.Data") -) - -func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { - if vd == nil { - return nil, errNilViewData - } - - descriptor, err := viewToMetricDescriptor(vd.View) - if err != nil { - return nil, err - } - - timeseries, err := viewDataToTimeseries(vd) - if err != nil { - return nil, err - } - - metric := &metricspb.Metric{ - MetricDescriptor: descriptor, - Timeseries: timeseries, - } - return metric, nil -} - -func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { - if v == nil { - return nil, errNilView - } - if v.Measure == nil { - return nil, errNilMeasure - } - - desc := &metricspb.MetricDescriptor{ - Name: stringOrCall(v.Name, v.Measure.Name), - Description: stringOrCall(v.Description, v.Measure.Description), - Unit: v.Measure.Unit(), - Type: aggregationToMetricDescriptorType(v), - LabelKeys: tagKeysToLabelKeys(v.TagKeys), - } - return desc, nil -} - -func stringOrCall(first string, call func() string) string { - if first != "" { - return first - } - return call() -} - -type measureType uint - -const ( - measureUnknown measureType = iota - measureInt64 - measureFloat64 -) - -func measureTypeFromMeasure(m stats.Measure) measureType { - switch m.(type) { - default: - return measureUnknown - case *stats.Float64Measure: - return measureFloat64 - case *stats.Int64Measure: - return measureInt64 - } -} - -func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { - if v == nil || v.Aggregation == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - if v.Measure == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - - switch v.Aggregation.Type { - case view.AggTypeCount: - // Cumulative on int64 - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - - case view.AggTypeDistribution: - // Cumulative types - return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION - - case view.AggTypeLastValue: - // Gauge types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_GAUGE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_GAUGE_INT64 - } - - case view.AggTypeSum: - // Cumulative types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - } - } - - // For all other cases, return unspecified. - return metricspb.MetricDescriptor_UNSPECIFIED -} - -func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { - labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) - for _, tagKey := range tagKeys { - labelKeys = append(labelKeys, &metricspb.LabelKey{ - Key: tagKey.Name(), - }) - } - return labelKeys -} - -func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { - if vd == nil || len(vd.Rows) == 0 { - return nil, nil - } - - // Given that view.Data only contains Start, End - // the timestamps for all the row data will be the exact same - // per aggregation. However, the values will differ. - // Each row has its own tags. - startTimestamp := timeToProtoTimestamp(vd.Start) - endTimestamp := timeToProtoTimestamp(vd.End) - - mType := measureTypeFromMeasure(vd.View.Measure) - timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) - // It is imperative that the ordering of "LabelValues" matches those - // of the Label keys in the metric descriptor. - for _, row := range vd.Rows { - labelValues := labelValuesFromTags(row.Tags) - point := rowToPoint(vd.View, row, endTimestamp, mType) - timeseries = append(timeseries, &metricspb.TimeSeries{ - StartTimestamp: startTimestamp, - LabelValues: labelValues, - Points: []*metricspb.Point{point}, - }) - } - - if len(timeseries) == 0 { - return nil, nil - } - - return timeseries, nil -} - -func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { - unixNano := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: int64(unixNano / 1e9), - Nanos: int32(unixNano % 1e9), - } -} - -func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { - pt := &metricspb.Point{ - Timestamp: endTimestamp, - } - - switch data := row.Data.(type) { - case *view.CountData: - pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} - - case *view.DistributionData: - pt.Value = &metricspb.Point_DistributionValue{ - DistributionValue: &metricspb.DistributionValue{ - Count: data.Count, - Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count - // TODO: Add Exemplar - Buckets: bucketsToProtoBuckets(data.CountPerBucket), - BucketOptions: &metricspb.DistributionValue_BucketOptions{ - Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ - Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ - Bounds: v.Aggregation.Buckets, - }, - }, - }, - SumOfSquaredDeviation: data.SumOfSquaredDev, - }} - - case *view.LastValueData: - setPointValue(pt, data.Value, mType) - - case *view.SumData: - setPointValue(pt, data.Value, mType) - } - - return pt -} - -// Not returning anything from this function because metricspb.Point.is_Value is an unexported -// interface hence we just have to set its value by pointer. -func setPointValue(pt *metricspb.Point, value float64, mType measureType) { - if mType == measureInt64 { - pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} - } else { - pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} - } -} - -func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { - distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) - for i := 0; i < len(countPerBucket); i++ { - count := countPerBucket[i] - - distBuckets[i] = &metricspb.DistributionValue_Bucket{ - Count: count, - } - } - - return distBuckets -} - -func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { - if len(tags) == 0 { - return nil - } - - labelValues := make([]*metricspb.LabelValue, 0, len(tags)) - for _, tag_ := range tags { - labelValues = append(labelValues, &metricspb.LabelValue{ - Value: tag_.Value, - - // It is imperative that we set the "HasValue" attribute, - // in order to distinguish missing a label from the empty string. - // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue - // - // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, - // so the best case that we can use to distinguish missing labels/tags from the - // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have - // a value. - HasValue: tag_.Key.Name() != "", - }) - } - return labelValues -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE similarity index 100% rename from vendor/github.com/Azure/go-autorest/autorest/LICENSE rename to vendor/github.com/Azure/go-autorest/LICENSE diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index fa5964742f..8c83a917ff 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -15,15 +15,10 @@ package adal // limitations under the License. import ( - "errors" "fmt" "net/url" ) -const ( - activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -65,6 +60,7 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV } api = fmt.Sprintf("?api-version=%s", *apiVersion) } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -93,59 +89,3 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV DeviceCodeEndpoint: *deviceCodeURL, }, nil } - -// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. -type MultiTenantOAuthConfig interface { - PrimaryTenant() *OAuthConfig - AuxiliaryTenants() []*OAuthConfig -} - -// OAuthOptions contains optional OAuthConfig creation arguments. -type OAuthOptions struct { - APIVersion string -} - -func (c OAuthOptions) apiVersion() string { - if c.APIVersion != "" { - return fmt.Sprintf("?api-version=%s", c.APIVersion) - } - return "1.0" -} - -// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. -func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { - if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { - return nil, errors.New("must specify one to three auxiliary tenants") - } - mtCfg := multiTenantOAuthConfig{ - cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), - } - apiVer := options.apiVersion() - pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) - } - mtCfg.cfgs[0] = pri - for i := range auxiliaryTenantIDs { - aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) - } - mtCfg.cfgs[i+1] = aux - } - return mtCfg, nil -} - -type multiTenantOAuthConfig struct { - // first config in the slice is the primary tenant - cfgs []*OAuthConfig -} - -func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { - return m.cfgs[0] -} - -func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { - return m.cfgs[1:] -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod deleted file mode 100644 index 5c95dbfea4..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/Azure/go-autorest/autorest/adal - -go 1.12 - -require ( - github.com/Azure/go-autorest/autorest/date v0.1.0 - github.com/Azure/go-autorest/autorest/mocks v0.1.0 - github.com/Azure/go-autorest/tracing v0.1.0 - github.com/dgrijalva/jwt-go v3.2.0+incompatible - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 -) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum deleted file mode 100644 index ef9d101618..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum +++ /dev/null @@ -1,139 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 4083e76e69..effa87ab2f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -71,12 +71,6 @@ type OAuthTokenProvider interface { OAuthToken() string } -// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. -type MultitenantOAuthTokenProvider interface { - PrimaryOAuthToken() string - AuxiliaryOAuthTokens() []string -} - // TokenRefreshError is an interface used by errors returned during token refresh. type TokenRefreshError interface { error @@ -989,67 +983,3 @@ func (spt *ServicePrincipalToken) Token() Token { defer spt.refreshLock.RUnlock() return spt.inner.Token } - -// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. -type MultiTenantServicePrincipalToken struct { - PrimaryToken *ServicePrincipalToken - AuxiliaryTokens []*ServicePrincipalToken -} - -// PrimaryOAuthToken returns the primary authorization token. -func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { - return mt.PrimaryToken.OAuthToken() -} - -// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. -func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { - tokens := make([]string, len(mt.AuxiliaryTokens)) - for i := range mt.AuxiliaryTokens { - tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() - } - return tokens -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. -func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 380865cd6a..bc474b406a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -15,7 +15,6 @@ package autorest // limitations under the License. import ( - "encoding/base64" "fmt" "net/http" "net/url" @@ -32,8 +31,6 @@ const ( apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" bingAPISdkHeader = "X-BingApis-SDK-Client" golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" ) // Authorizer is the interface that provides a PrepareDecorator used to supply request @@ -261,76 +258,3 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { } return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() } - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. -type MultiTenantServicePrincipalTokenAuthorizer interface { - WithAuthorization() PrepareDecorator -} - -// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider -func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { - return &multiTenantSPTAuthorizer{tp: tp} -} - -type multiTenantSPTAuthorizer struct { - tp adal.MultitenantOAuthTokenProvider -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the -// primary token along with the auxiliary authorization header using the auxiliary tokens. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, - "Failed to refresh one or more Tokens for request to %s", r.URL) - } - } - r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) - if err != nil { - return r, err - } - auxTokens := mt.tp.AuxiliaryOAuthTokens() - for i := range auxTokens { - auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) - } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 02d0119614..0041eacf75 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -45,7 +45,14 @@ var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.Stat // Future provides a mechanism to access the status and results of an asynchronous request. // Since futures are stateful they should be passed by value to avoid race conditions. type Future struct { - pt pollingTracker + req *http.Request // legacy + pt pollingTracker +} + +// NewFuture returns a new Future object initialized with the specified request. +// Deprecated: Please use NewFutureFromResponse instead. +func NewFuture(req *http.Request) Future { + return Future{req: req} } // NewFutureFromResponse returns a new Future object initialized @@ -79,6 +86,12 @@ func (f Future) PollingMethod() PollingMethodType { return f.pt.pollingMethod() } +// Done queries the service to see if the operation has completed. +// Deprecated: Use DoneWithContext() +func (f *Future) Done(sender autorest.Sender) (bool, error) { + return f.DoneWithContext(context.Background(), sender) +} + // DoneWithContext queries the service to see if the operation has completed. func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") @@ -91,6 +104,20 @@ func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (d tracing.EndSpan(ctx, sc, err) }() + // support for legacy Future implementation + if f.req != nil { + resp, err := sender.Do(f.req) + if err != nil { + return false, err + } + pt, err := createPollingTracker(resp) + if err != nil { + return false, err + } + f.pt = pt + f.req = nil + } + // end legacy if f.pt == nil { return false, autorest.NewError("Future", "Done", "future is not initialized") } @@ -141,6 +168,15 @@ func (f Future) GetPollingDelay() (time.Duration, bool) { return d, true } +// WaitForCompletion will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// Deprecated: Please use WaitForCompletionRef() instead. +func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { + return f.WaitForCompletionRef(ctx, client) +} + // WaitForCompletionRef will return when one of the following conditions is met: the long // running operation has completed, the provided context is cancelled, or the client's // polling duration has been exceeded. It will retry failed polling attempts based on @@ -883,6 +919,43 @@ func isValidURL(s string) bool { return err == nil && u.IsAbs() } +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via +// the context associated with the http.Request. +// Deprecated: Prefer using Futures to allow for non-blocking async operations. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + return resp, err + } + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + return resp, nil + } + future, err := NewFutureFromResponse(resp) + if err != nil { + return resp, err + } + // retry until either the LRO completes or we receive an error + var done bool + for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { + // check for Retry-After delay, if not present use the specified polling delay + if pd, ok := future.GetPollingDelay(); ok { + delay = pd + } + // wait until the delay elapses or the context is cancelled + if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { + return future.Response(), + autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") + } + } + return future.Response(), err + }) + } +} + // PollingMethodType defines a type used for enumerating polling mechanisms. type PollingMethodType string diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 6c20b8179a..7e41f7fd99 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -22,14 +22,9 @@ import ( "strings" ) -const ( - // EnvironmentFilepathName captures the name of the environment variable containing the path to the file - // to be used while populating the Azure Environment. - EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - - // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. - NotAvailable = "N/A" -) +// EnvironmentFilepathName captures the name of the environment variable containing the path to the file +// to be used while populating the Azure Environment. +const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, @@ -38,40 +33,28 @@ var environments = map[string]Environment{ "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, } -// ResourceIdentifier contains a set of Azure resource IDs. -type ResourceIdentifier struct { - Graph string `json:"graph"` - KeyVault string `json:"keyVault"` - Datalake string `json:"datalake"` - Batch string `json:"batch"` - OperationalInsights string `json:"operationalInsights"` - Storage string `json:"storage"` -} - // Environment represents a set of endpoints for each of Azure's Clouds. type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - TokenAudience string `json:"tokenAudience"` - ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` } var ( @@ -96,16 +79,7 @@ var ( ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", ContainerRegistryDNSSuffix: "azurecr.io", - CosmosDBDNSSuffix: "documents.azure.com", TokenAudience: "https://management.azure.com/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.azure.net", - Datalake: "https://datalake.azure.net/", - Batch: "https://batch.core.windows.net/", - OperationalInsights: "https://api.loganalytics.io", - Storage: "https://storage.azure.com/", - }, } // USGovernmentCloud is the cloud environment for the US Government @@ -128,17 +102,8 @@ var ( ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.us", - CosmosDBDNSSuffix: "documents.azure.us", + ContainerRegistryDNSSuffix: "azurecr.io", TokenAudience: "https://management.usgovcloudapi.net/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.usgovcloudapi.net", - Datalake: NotAvailable, - Batch: "https://batch.core.usgovcloudapi.net/", - OperationalInsights: "https://api.loganalytics.us", - Storage: "https://storage.azure.com/", - }, } // ChinaCloud is the cloud environment operated in China @@ -161,17 +126,8 @@ var ( ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - CosmosDBDNSSuffix: "documents.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", TokenAudience: "https://management.chinacloudapi.cn/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.chinacloudapi.cn/", - KeyVault: "https://vault.azure.cn", - Datalake: NotAvailable, - Batch: "https://batch.chinacloudapi.cn/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, } // GermanCloud is the cloud environment operated in Germany @@ -194,17 +150,8 @@ var ( ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: NotAvailable, - CosmosDBDNSSuffix: "documents.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", TokenAudience: "https://management.microsoftazure.de/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.cloudapi.de/", - KeyVault: "https://vault.microsoftazure.de", - Datalake: NotAvailable, - Batch: "https://batch.cloudapi.de/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, } ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 92da6adb2c..4874e6e82d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -16,7 +16,6 @@ package autorest import ( "bytes" - "crypto/tls" "fmt" "io" "io/ioutil" @@ -73,22 +72,6 @@ type Response struct { *http.Response `json:"-"` } -// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. -// If there was no response (i.e. the underlying http.Response is nil) the return value is false. -func (r Response) IsHTTPStatus(statusCode int) bool { - if r.Response == nil { - return false - } - return r.Response.StatusCode == statusCode -} - -// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. -// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided -// the return value is false. -func (r Response) HasHTTPStatus(statusCodes ...int) bool { - return ResponseHasStatusCode(r.Response, statusCodes...) -} - // LoggingInspector implements request and response inspectors that log the full request and // response to a supplied log. type LoggingInspector struct { @@ -186,24 +169,6 @@ type Client struct { // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed // string. func NewClientWithUserAgent(ua string) Client { - return newClient(ua, tls.RenegotiateNever) -} - -// ClientOptions contains various Client configuration options. -type ClientOptions struct { - // UserAgent is an optional user-agent string to append to the default user agent. - UserAgent string - - // Renegotiation is an optional setting to control client-side TLS renegotiation. - Renegotiation tls.RenegotiationSupport -} - -// NewClientWithOptions returns an instance of a Client with the specified values. -func NewClientWithOptions(options ClientOptions) Client { - return newClient(options.UserAgent, options.Renegotiation) -} - -func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { c := Client{ PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, @@ -211,7 +176,7 @@ func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { RetryDuration: DefaultRetryDuration, UserAgent: UserAgent(), } - c.Sender = c.sender(renegotiation) + c.Sender = c.sender() c.AddToUserAgent(ua) return c } @@ -255,37 +220,18 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { return true, v }, }) - resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + resp, err := SendWithSender(c.sender(), r) logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } // sender returns the Sender to which to send requests. -func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { +func (c Client) sender() Sender { if c.Sender == nil { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - var defaultTransport = http.DefaultTransport.(*http.Transport) - transport := tracing.Transport - // for non-default values of TLS renegotiation create a new tracing transport. - // updating tracing.Transport affects all clients which is not what we want. - if renengotiation != tls.RenegotiateNever { - transport = tracing.NewTransport() - } - transport.Base = &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: renengotiation, - }, - } j, _ := cookiejar.New(nil) - return &http.Client{Jar: j, Transport: transport} + client := &http.Client{Jar: j, Transport: tracing.Transport} + return client } return c.Sender diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod deleted file mode 100644 index 13a1e98033..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/Azure/go-autorest/autorest/date - -go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod deleted file mode 100644 index 6b7c7f2762..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/Azure/go-autorest/autorest - -go 1.12 - -require ( - github.com/Azure/go-autorest/autorest/adal v0.2.0 - github.com/Azure/go-autorest/autorest/mocks v0.1.0 - github.com/Azure/go-autorest/logger v0.1.0 - github.com/Azure/go-autorest/tracing v0.1.0 - go.opencensus.io v0.20.2 - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 -) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum deleted file mode 100644 index a6848303f4..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go.sum +++ /dev/null @@ -1,143 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= -github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 4891402249..6d67bd7337 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -17,7 +17,6 @@ package autorest import ( "bytes" "encoding/json" - "encoding/xml" "fmt" "io" "io/ioutil" @@ -32,10 +31,9 @@ const ( mimeTypeOctetStream = "application/octet-stream" mimeTypeFormPost = "application/x-www-form-urlencoded" - headerAuthorization = "Authorization" - headerAuxAuthorization = "x-ms-authorization-auxiliary" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" ) // Preparer is the interface that wraps the Prepare method. @@ -192,9 +190,6 @@ func AsGet() PrepareDecorator { return WithMethod("GET") } // AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. func AsHead() PrepareDecorator { return WithMethod("HEAD") } -// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. -func AsMerge() PrepareDecorator { return WithMethod("MERGE") } - // AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } @@ -230,25 +225,6 @@ func WithBaseURL(baseURL string) PrepareDecorator { } } -// WithBytes returns a PrepareDecorator that takes a list of bytes -// which passes the bytes directly to the body -func WithBytes(input *[]byte) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if input == nil { - return r, fmt.Errorf("Input Bytes was nil") - } - - r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) - } - return r, err - }) - } -} - // WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the // request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { @@ -401,28 +377,6 @@ func WithJSON(v interface{}) PrepareDecorator { } } -// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the -// request and sets the Content-Length header. -func WithXML(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := xml.Marshal(v) - if err == nil { - // we have to tack on an XML header - withHeader := xml.Header + string(b) - bytesWithHeader := []byte(withHeader) - - r.ContentLength = int64(len(bytesWithHeader)) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) - } - } - return r, err - }) - } -} - // WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path // is absolute (that is, it begins with a "/"), it replaces the existing path. func WithPath(path string) PrepareDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 349e1963a2..a908a0adb7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -153,25 +153,6 @@ func ByClosingIfError() RespondDecorator { } } -// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingBytes(v *[]byte) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - *v = bytes - } - } - return err - }) - } -} - // ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the // response Body into the value pointed to by v. func ByUnmarshallingJSON(v interface{}) RespondDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index b918833fab..6665d7c006 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -15,7 +15,6 @@ package autorest // limitations under the License. import ( - "context" "fmt" "log" "math" @@ -26,23 +25,6 @@ import ( "github.com/Azure/go-autorest/tracing" ) -// used as a key type in context.WithValue() -type ctxSendDecorators struct{} - -// WithSendDecorators adds the specified SendDecorators to the provided context. -func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { - return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) -} - -// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. -func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { - inCtx := ctx.Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - return sd - } - return defaultSendDecorators -} - // Sender is the interface that wraps the Do method to send HTTP requests. // // The standard http.Client conforms to this interface. @@ -229,77 +211,53 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { // DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified // number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. -// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) - }) - } -} - -// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the -// specified number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater -// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. -func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) - }) - } -} - -func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return nil, err + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ + } + } return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // when count429 == false don't count a 429 against the number - // of attempts so that we continue to retry until it succeeds - if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { - attempt++ - } + }) } - return resp, err } -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. -// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. -// The function returns true after successfully waiting for the specified duration. If there is -// no Retry-After header or the wait is cancelled the return value is false. +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { if resp == nil { return false } - var dur time.Duration - ra := resp.Header.Get("Retry-After") - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - dur = time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - dur = t.Sub(time.Now()) - } - if dur > 0 { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { select { - case <-time.After(dur): + case <-time.After(time.Duration(retryAfter) * time.Second): return true case <-cancel: return false @@ -359,22 +317,8 @@ func WithLogging(logger *log.Logger) SendDecorator { // Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt // count. func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - return DelayForBackoffWithCap(backoff, 0, attempt, cancel) -} - -// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. -// The delay may be canceled by closing the passed channel. If terminated early, returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { - d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second - if cap > 0 && d > cap { - d = cap - } select { - case <-time.After(d): + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): return true case <-cancel: return false diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index ec7e848172..6230b73961 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v12.3.0" +const number = "v11.5.0" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod deleted file mode 100644 index f22ed56bcd..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/Azure/go-autorest/logger - -go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod deleted file mode 100644 index b066b3e6e6..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/Azure/go-autorest/tracing - -go 1.12 - -require ( - contrib.go.opencensus.io/exporter/ocagent v0.4.12 - go.opencensus.io v0.20.2 -) diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum deleted file mode 100644 index 4e5548c916..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/go.sum +++ /dev/null @@ -1,130 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go index 28951c2848..cd61cb18b6 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -30,7 +30,10 @@ import ( var ( // Transport is the default tracing RoundTripper. The custom options setter will control // if traces are being emitted or not. - Transport = NewTransport() + Transport = &ochttp.Transport{ + Propagation: &tracecontext.HTTPFormat{}, + GetStartOptions: getStartOptions, + } // enabled is the flag for marking if tracing is enabled. enabled = false @@ -64,14 +67,6 @@ func enableFromEnv() { } } -// NewTransport returns a new instance of a tracing-aware RoundTripper. -func NewTransport() *ochttp.Transport { - return &ochttp.Transport{ - Propagation: &tracecontext.HTTPFormat{}, - GetStartOptions: getStartOptions, - } -} - // IsEnabled returns true if monitoring is enabled for the sdk. func IsEnabled() bool { return enabled diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go index 12b578d068..2f12e428ed 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -19,7 +19,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type LibraryInfo_Language int32 @@ -70,8 +70,8 @@ func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { return fileDescriptor_126c72ed8a252c84, []int{2, 0} } -// Identifier metadata of the Node that produces the span or tracing data. -// Note, this is not the metadata about the Node or service that is described by associated spans. +// Identifier metadata of the Node (Application instrumented with OpenCensus) +// that connects to OpenCensus Agent. // In the future we plan to extend the identifier proto definition to support // additional information (e.g cloud id, etc.) type Node struct { diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go deleted file mode 100644 index 801212d925..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportMetricsServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Metrics from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of metrics that belong to the last received Node. - Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` - // The resource for the metrics in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known - // at all or when all sent metrics have an explicit resource set. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{0} -} - -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { - if m != nil { - return m.Metrics - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportMetricsServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{1} -} - -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) -} - -var fileDescriptor_47e253a956287d04 = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40, - 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45, - 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f, - 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24, - 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26, - 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5, - 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09, - 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54, - 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1, - 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4, - 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7, - 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c, - 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58, - 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6, - 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b, - 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb, - 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90, - 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a, - 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab, - 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa, - 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3, - 0x1b, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) - if err != nil { - return nil, err - } - x := &metricsServiceExportClient{stream} - return x, nil -} - -type MetricsService_ExportClient interface { - Send(*ExportMetricsServiceRequest) error - Recv() (*ExportMetricsServiceResponse, error) - grpc.ClientStream -} - -type metricsServiceExportClient struct { - grpc.ClientStream -} - -func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { - m := new(ExportMetricsServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(MetricsService_ExportServer) error -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) -} - -type MetricsService_ExportServer interface { - Send(*ExportMetricsServiceResponse) error - Recv() (*ExportMetricsServiceRequest, error) - grpc.ServerStream -} - -type metricsServiceExportServer struct { - grpc.ServerStream -} - -func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { - m := new(ExportMetricsServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Export", - Handler: _MetricsService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go index e7c49a387a..01eddf4cc1 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -4,12 +4,12 @@ package v1 import ( - context "context" fmt "fmt" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" math "math" ) @@ -23,7 +23,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type CurrentLibraryConfig struct { // This is required only in the first message on the stream or if the diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go deleted file mode 100644 index bd4b8a8278..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.Export(ctx) - if err != nil { - grpclog.Infof("Failed to start streaming: %v", err) - return nil, metadata, err - } - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, berr - } - dec := marshaler.NewDecoder(newReader()) - handleSend := func() error { - var protoReq ExportTraceServiceRequest - err := dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Infof("Failed to decode request: %v", err) - return err - } - if err := stream.Send(&protoReq); err != nil { - grpclog.Infof("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Infof("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Infof("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterTraceServiceHandler(ctx, mux, conn) -} - -// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) -} - -// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "TraceServiceClient" to call the correct interceptors. -func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) -) - -var ( - forward_TraceService_Export_0 = runtime.ForwardResponseStream -) diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go deleted file mode 100644 index 53b8aa99e1..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,1126 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/metrics/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// The kind of metric. It describes how the data is reported. -// -// A gauge is an instantaneous measurement of a value. -// -// A cumulative measurement is a value accumulated over a time interval. In -// a time series, cumulative measurements should have the same start time, -// increasing values and increasing end times, until an event resets the -// cumulative value to zero and sets a new start time for the following -// points. -type MetricDescriptor_Type int32 - -const ( - // Do not use this default value. - MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 - // Integer gauge. The value can go both up and down. - MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 - // Floating point gauge. The value can go both up and down. - MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 - // Distribution gauge measurement. The count and sum can go both up and - // down. Recorded values are always >= 0. - // Used in scenarios like a snapshot of time the current items in a queue - // have spent there. - MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 - // Integer cumulative measurement. The value cannot decrease, if resets - // then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 - // Floating point cumulative measurement. The value cannot decrease, if - // resets then the start_time should also be reset. Recorded values are - // always >= 0. - MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 - // Distribution cumulative measurement. The count and sum cannot decrease, - // if resets then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 - // Some frameworks implemented Histograms as a summary of observations - // (usually things like request durations and response sizes). While it - // also provides a total count of observations and a sum of all observed - // values, it calculates configurable percentiles over a sliding time - // window. This is not recommended, since it cannot be aggregated. - MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 -) - -var MetricDescriptor_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "GAUGE_INT64", - 2: "GAUGE_DOUBLE", - 3: "GAUGE_DISTRIBUTION", - 4: "CUMULATIVE_INT64", - 5: "CUMULATIVE_DOUBLE", - 6: "CUMULATIVE_DISTRIBUTION", - 7: "SUMMARY", -} - -var MetricDescriptor_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "GAUGE_INT64": 1, - "GAUGE_DOUBLE": 2, - "GAUGE_DISTRIBUTION": 3, - "CUMULATIVE_INT64": 4, - "CUMULATIVE_DOUBLE": 5, - "CUMULATIVE_DISTRIBUTION": 6, - "SUMMARY": 7, -} - -func (x MetricDescriptor_Type) String() string { - return proto.EnumName(MetricDescriptor_Type_name, int32(x)) -} - -func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1, 0} -} - -// Defines a Metric which has one or more timeseries. -type Metric struct { - // The descriptor of the Metric. - // TODO(issue #152): consider only sending the name of descriptor for - // optimization. - MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` - // One or more timeseries for a single metric, where each timeseries has - // one or more points. - Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` - // The resource for the metric. If unset, it may be set to a default value - // provided for a sequence of messages in an RPC stream. - Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{0} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetMetricDescriptor() *MetricDescriptor { - if m != nil { - return m.MetricDescriptor - } - return nil -} - -func (m *Metric) GetTimeseries() []*TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -func (m *Metric) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -// Defines a metric type and its schema. -type MetricDescriptor struct { - // The metric type, including its DNS name prefix. It must be unique. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A detailed description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // The unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` - // The label keys associated with the metric descriptor. - LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } -func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } -func (*MetricDescriptor) ProtoMessage() {} -func (*MetricDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1} -} - -func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) -} -func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) -} -func (m *MetricDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricDescriptor.Merge(m, src) -} -func (m *MetricDescriptor) XXX_Size() int { - return xxx_messageInfo_MetricDescriptor.Size(m) -} -func (m *MetricDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo - -func (m *MetricDescriptor) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *MetricDescriptor) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *MetricDescriptor) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *MetricDescriptor) GetType() MetricDescriptor_Type { - if m != nil { - return m.Type - } - return MetricDescriptor_UNSPECIFIED -} - -func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { - if m != nil { - return m.LabelKeys - } - return nil -} - -// Defines a label key associated with a metric descriptor. -type LabelKey struct { - // The key for the label. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // A human-readable description of what this label key represents. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelKey) Reset() { *m = LabelKey{} } -func (m *LabelKey) String() string { return proto.CompactTextString(m) } -func (*LabelKey) ProtoMessage() {} -func (*LabelKey) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{2} -} - -func (m *LabelKey) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelKey.Unmarshal(m, b) -} -func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) -} -func (m *LabelKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelKey.Merge(m, src) -} -func (m *LabelKey) XXX_Size() int { - return xxx_messageInfo_LabelKey.Size(m) -} -func (m *LabelKey) XXX_DiscardUnknown() { - xxx_messageInfo_LabelKey.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelKey proto.InternalMessageInfo - -func (m *LabelKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *LabelKey) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -// A collection of data points that describes the time-varying values -// of a metric. -type TimeSeries struct { - // Must be present for cumulative metrics. The time when the cumulative value - // was reset to zero. Exclusive. The cumulative value is over the time interval - // (start_timestamp, timestamp]. If not specified, the backend can use the - // previous recorded value. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - // The set of label values that uniquely identify this timeseries. Applies to - // all points. The order of label values must match that of label keys in the - // metric descriptor. - LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - // The data points of this timeseries. Point.value type MUST match the - // MetricDescriptor.type. - Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{3} -} - -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TimeSeries.Unmarshal(m, b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return xxx_messageInfo_TimeSeries.Size(m) -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -func (m *TimeSeries) GetLabelValues() []*LabelValue { - if m != nil { - return m.LabelValues - } - return nil -} - -func (m *TimeSeries) GetPoints() []*Point { - if m != nil { - return m.Points - } - return nil -} - -type LabelValue struct { - // The value for the label. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // If false the value field is ignored and considered not set. - // This is used to differentiate a missing label from an empty string. - HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelValue) Reset() { *m = LabelValue{} } -func (m *LabelValue) String() string { return proto.CompactTextString(m) } -func (*LabelValue) ProtoMessage() {} -func (*LabelValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{4} -} - -func (m *LabelValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelValue.Unmarshal(m, b) -} -func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) -} -func (m *LabelValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValue.Merge(m, src) -} -func (m *LabelValue) XXX_Size() int { - return xxx_messageInfo_LabelValue.Size(m) -} -func (m *LabelValue) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValue.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValue proto.InternalMessageInfo - -func (m *LabelValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *LabelValue) GetHasValue() bool { - if m != nil { - return m.HasValue - } - return false -} - -// A timestamped measurement. -type Point struct { - // The moment when this point was recorded. Inclusive. - // If not specified, the timestamp will be decided by the backend. - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The actual point value. - // - // Types that are valid to be assigned to Value: - // *Point_Int64Value - // *Point_DoubleValue - // *Point_DistributionValue - // *Point_SummaryValue - Value isPoint_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Point) Reset() { *m = Point{} } -func (m *Point) String() string { return proto.CompactTextString(m) } -func (*Point) ProtoMessage() {} -func (*Point) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{5} -} - -func (m *Point) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Point.Unmarshal(m, b) -} -func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Point.Marshal(b, m, deterministic) -} -func (m *Point) XXX_Merge(src proto.Message) { - xxx_messageInfo_Point.Merge(m, src) -} -func (m *Point) XXX_Size() int { - return xxx_messageInfo_Point.Size(m) -} -func (m *Point) XXX_DiscardUnknown() { - xxx_messageInfo_Point.DiscardUnknown(m) -} - -var xxx_messageInfo_Point proto.InternalMessageInfo - -func (m *Point) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type isPoint_Value interface { - isPoint_Value() -} - -type Point_Int64Value struct { - Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Point_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Point_DistributionValue struct { - DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` -} - -type Point_SummaryValue struct { - SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` -} - -func (*Point_Int64Value) isPoint_Value() {} - -func (*Point_DoubleValue) isPoint_Value() {} - -func (*Point_DistributionValue) isPoint_Value() {} - -func (*Point_SummaryValue) isPoint_Value() {} - -func (m *Point) GetValue() isPoint_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Point) GetInt64Value() int64 { - if x, ok := m.GetValue().(*Point_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (m *Point) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*Point_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *Point) GetDistributionValue() *DistributionValue { - if x, ok := m.GetValue().(*Point_DistributionValue); ok { - return x.DistributionValue - } - return nil -} - -func (m *Point) GetSummaryValue() *SummaryValue { - if x, ok := m.GetValue().(*Point_SummaryValue); ok { - return x.SummaryValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Point) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Point_Int64Value)(nil), - (*Point_DoubleValue)(nil), - (*Point_DistributionValue)(nil), - (*Point_SummaryValue)(nil), - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type DistributionValue struct { - // The number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` - // The sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` - // Don't change bucket boundaries within a TimeSeries if your backend doesn't - // support this. - // TODO(issue #152): consider not required to send bucket options for - // optimization. - BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` - // If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue) Reset() { *m = DistributionValue{} } -func (m *DistributionValue) String() string { return proto.CompactTextString(m) } -func (*DistributionValue) ProtoMessage() {} -func (*DistributionValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6} -} - -func (m *DistributionValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue.Unmarshal(m, b) -} -func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) -} -func (m *DistributionValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue.Merge(m, src) -} -func (m *DistributionValue) XXX_Size() int { - return xxx_messageInfo_DistributionValue.Size(m) -} -func (m *DistributionValue) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue proto.InternalMessageInfo - -func (m *DistributionValue) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { - if m != nil { - return m.SumOfSquaredDeviation - } - return 0 -} - -func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { - if m != nil { - return m.BucketOptions - } - return nil -} - -func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { - if m != nil { - return m.Buckets - } - return nil -} - -// A Distribution may optionally contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described by -// BucketOptions. -// -// If bucket_options has no type, then there is no histogram associated with -// the Distribution. -type DistributionValue_BucketOptions struct { - // Types that are valid to be assigned to Type: - // *DistributionValue_BucketOptions_Explicit_ - Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } -func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions) ProtoMessage() {} -func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0} -} - -func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) -} -func (m *DistributionValue_BucketOptions) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) -} -func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo - -type isDistributionValue_BucketOptions_Type interface { - isDistributionValue_BucketOptions_Type() -} - -type DistributionValue_BucketOptions_Explicit_ struct { - Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` -} - -func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} - -func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { - if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { - return x.Explicit - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*DistributionValue_BucketOptions_Explicit_)(nil), - } -} - -// Specifies a set of buckets with arbitrary upper-bounds. -// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket -// index i are: -// -// [0, bucket_bounds[i]) for i == 0 -// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 -// [bucket_bounds[i], +infinity) for i == N-1 -type DistributionValue_BucketOptions_Explicit struct { - // The values must be strictly increasing and > 0. - Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions_Explicit) Reset() { - *m = DistributionValue_BucketOptions_Explicit{} -} -func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} -func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} -} - -func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo - -func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { - if m != nil { - return m.Bounds - } - return nil -} - -type DistributionValue_Bucket struct { - // The number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // If the distribution does not have a histogram, then omit this field. - Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } -func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Bucket) ProtoMessage() {} -func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 1} -} - -func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) -} -func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) -} -func (m *DistributionValue_Bucket) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Bucket.Size(m) -} -func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo - -func (m *DistributionValue_Bucket) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -// Exemplars are example points that may be used to annotate aggregated -// Distribution values. They are metadata that gives information about a -// particular value added to a Distribution bucket. -type DistributionValue_Exemplar struct { - // Value of the exemplar point. It determines which bucket the exemplar - // belongs to. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // The observation (sampling) time of the above value. - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Contextual information about the example value. - Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } -func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Exemplar) ProtoMessage() {} -func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 2} -} - -func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) -} -func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) -} -func (m *DistributionValue_Exemplar) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Exemplar.Size(m) -} -func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo - -func (m *DistributionValue_Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { - if m != nil { - return m.Attachments - } - return nil -} - -// The start_timestamp only applies to the count and sum in the SummaryValue. -type SummaryValue struct { - // The total number of recorded values since start_time. Optional since - // some systems don't expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The total sum of recorded values since start_time. Optional since some - // systems don't expose this. If count is zero then this field must be zero. - // This field must be unset if the sum is not available. - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // Values calculated over an arbitrary time window. - Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue) Reset() { *m = SummaryValue{} } -func (m *SummaryValue) String() string { return proto.CompactTextString(m) } -func (*SummaryValue) ProtoMessage() {} -func (*SummaryValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7} -} - -func (m *SummaryValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue.Unmarshal(m, b) -} -func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) -} -func (m *SummaryValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue.Merge(m, src) -} -func (m *SummaryValue) XXX_Size() int { - return xxx_messageInfo_SummaryValue.Size(m) -} -func (m *SummaryValue) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue proto.InternalMessageInfo - -func (m *SummaryValue) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -// The values in this message can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type SummaryValue_Snapshot struct { - // The number of values in the snapshot. Optional since some systems don't - // expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of values in the snapshot. Optional since some systems don't - // expose this. If count is zero then this field must be zero or not set - // (if not supported). - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // A list of values at different percentiles of the distribution calculated - // from the current snapshot. The percentiles must be strictly increasing. - PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } -func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot) ProtoMessage() {} -func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0} -} - -func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) -} -func (m *SummaryValue_Snapshot) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot.Size(m) -} -func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { - if m != nil { - return m.PercentileValues - } - return nil -} - -// Represents the value at a given percentile of a distribution. -type SummaryValue_Snapshot_ValueAtPercentile struct { - // The percentile of a distribution. Must be in the interval - // (0.0, 100.0]. - Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` - // The value at the given percentile of a distribution. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { - *m = SummaryValue_Snapshot_ValueAtPercentile{} -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} -func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { - if m != nil { - return m.Percentile - } - return 0 -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) - proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") - proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") - proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") - proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") - proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") - proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") - proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") - proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") - proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") - proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") - proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") - proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") - proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") - proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") -} - -func init() { - proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) -} - -var fileDescriptor_0ee3deb72053811a = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, - 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2, - 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87, - 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8, - 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4, - 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f, - 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97, - 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98, - 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06, - 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01, - 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64, - 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21, - 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1, - 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43, - 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77, - 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e, - 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a, - 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5, - 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42, - 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6, - 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb, - 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35, - 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65, - 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c, - 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80, - 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4, - 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac, - 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e, - 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32, - 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b, - 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1, - 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34, - 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39, - 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6, - 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e, - 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6, - 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9, - 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34, - 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13, - 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d, - 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5, - 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39, - 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79, - 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a, - 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99, - 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93, - 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf, - 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b, - 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1, - 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06, - 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f, - 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75, - 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52, - 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca, - 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6, - 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58, - 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72, - 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09, - 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff, - 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33, - 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50, - 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d, - 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff, - 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec, - 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd, - 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc, - 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef, - 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go index 38faa9fdf1..560dbd94a0 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -18,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Resource information. type Resource struct { diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go index 4de05355a4..c34e388922 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -5,7 +5,6 @@ package v1 import ( fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" wrappers "github.com/golang/protobuf/ptypes/wrappers" @@ -21,7 +20,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. @@ -125,31 +124,22 @@ func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { } // A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace. And form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. A trace can also contain multiple root spans, +// or none at all. Spans do not need to be contiguous - there may be +// gaps or overlaps between spans in a trace. // -// The next id is 17. +// The next id is 16. // TODO(bdrutu): Add an example. type Span struct { // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. + // the same `trace_id`. The ID is a 16-byte array. // // This field is required. TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. + // is created. The ID is an 8-byte array. // // This field is required. SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` @@ -165,37 +155,19 @@ type Span struct { // the same display name at the same call point in an application. // This makes it easier to correlate spans in different traces. // - // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. - // // This field is required. Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. + // two spans with the same name may be distinguished using `CLIENT` + // and `SERVER` to identify queueing latency associated with the span. Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` // The start time of the span. On the client side, this is the time kept by // the local machine where the span execution starts. On the server side, this // is the time when the server's application handler starts running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to the value of end_time field if it was - // set. Or to the current time if neither was set. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // The end time of the span. On the client side, this is the time kept by // the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to start_time value. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // A set of attributes on the span. Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` @@ -205,19 +177,11 @@ type Span struct { TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` // The included links. Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` - // An optional final status for this span. Semantically when Status - // wasn't set it is means span ended without errors and assume - // Status.Ok (code = 0). + // An optional final status for this span. Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` - // An optional resource that is associated with this span. If not set, this span - // should be part of a batch that does include the resource information, unless resource - // information is unknown. - Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` - // A highly recommended but not required flag that identifies when a - // trace crosses a process boundary. True when the parent_span belongs - // to the same process as the current span. This flag is most commonly - // used to indicate the need to adjust time as clocks in different - // processes may not be synchronized. + // A highly recommended but not required flag that identifies when a trace + // crosses a process boundary. True when the parent_span belongs to the + // same process as the current span. SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` // An optional number of child spans that were generated while this span // was active. If set, allows an implementation to detect missing child spans. @@ -343,13 +307,6 @@ func (m *Span) GetStatus() *Status { return nil } -func (m *Span) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { if m != nil { return m.SameProcessAsParentSpan @@ -463,14 +420,13 @@ func (m *Span_Tracestate_Entry) GetValue() string { // A set of attributes, each with a key and a value. type Span_Attributes struct { - // The set of attributes. The value can be a string, an integer, a double - // or the Boolean values `true` or `false`. Note, global attributes like - // server name can be set as tags using resource API. Examples of attributes: + // The set of attributes. The value can be a string, an integer, or the + // Boolean values `true` and `false`. For example: // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/instance_id": "my-instance" + // "/http/user_agent": "" // "/http/server_latency": 300 // "abc.com/myattribute": true - // "abc.com/score": 10.239 AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // The number of attributes that were discarded. Attributes can be discarded // because their keys are too long or because there are too many attributes. @@ -605,14 +561,80 @@ func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ (*Span_TimeEvent_Annotation_)(nil), (*Span_TimeEvent_MessageEvent_)(nil), } } +func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Annotation); err != nil { + return err + } + case *Span_TimeEvent_MessageEvent_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MessageEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) + } + return nil +} + +func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Span_TimeEvent) + switch tag { + case 2: // value.annotation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_Annotation) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_Annotation_{msg} + return true, err + case 3: // value.message_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_MessageEvent) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_MessageEvent_{msg} + return true, err + default: + return false, nil + } +} + +func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + s := proto.Size(x.Annotation) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Span_TimeEvent_MessageEvent_: + s := proto.Size(x.MessageEvent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // A text annotation with a set of attributes. type Span_TimeEvent_Annotation struct { // A user-supplied message describing the event. @@ -804,10 +826,11 @@ func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { // where a single batch handler processes multiple requests from different // traces or when the handler receives a request from a different project. type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` // The relationship of the current span relative to the linked span. Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` @@ -929,8 +952,7 @@ func (m *Span_Links) GetDroppedLinksCount() int32 { // [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), // which is used by [gRPC](https://github.com/grpc). type Status struct { - // The status code. This is optional field. It is safe to assume 0 (OK) - // when not set. + // The status code. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` @@ -1081,9 +1103,9 @@ func (m *AttributeValue) GetDoubleValue() float64 { return 0 } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AttributeValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{ (*AttributeValue_StringValue)(nil), (*AttributeValue_IntValue)(nil), (*AttributeValue_BoolValue)(nil), @@ -1091,6 +1113,97 @@ func (*AttributeValue) XXX_OneofWrappers() []interface{} { } } +func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StringValue); err != nil { + return err + } + case *AttributeValue_IntValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *AttributeValue_DoubleValue: + b.EncodeVarint(4<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case nil: + default: + return fmt.Errorf("AttributeValue.Value has unexpected type %T", x) + } + return nil +} + +func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributeValue) + switch tag { + case 1: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TruncatableString) + err := b.DecodeMessage(msg) + m.Value = &AttributeValue_StringValue{msg} + return true, err + case 2: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_IntValue{int64(x)} + return true, err + case 3: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_BoolValue{x != 0} + return true, err + case 4: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &AttributeValue_DoubleValue{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _AttributeValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + s := proto.Size(x.StringValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AttributeValue_IntValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + n += 1 // tag and wire + n += 1 + case *AttributeValue_DoubleValue: + n += 1 // tag and wire + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // The call stack which originated this span. type StackTrace struct { // Stack frames in this stack trace. @@ -1441,103 +1554,101 @@ func init() { } var fileDescriptor_8ea38bbb821bf584 = []byte{ - // 1557 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47, - 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17, - 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6, - 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81, - 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4, - 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d, - 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7, - 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91, - 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91, - 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6, - 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d, - 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15, - 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75, - 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97, - 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f, - 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb, - 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12, - 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e, - 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69, - 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9, - 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad, - 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e, - 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35, - 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69, - 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46, - 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49, - 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5, - 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04, - 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59, - 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27, - 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10, - 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b, - 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3, - 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90, - 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84, - 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5, - 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44, - 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7, - 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7, - 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b, - 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34, - 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45, - 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91, - 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08, - 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce, - 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5, - 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73, - 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee, - 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29, - 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5, - 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73, - 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4, - 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf, - 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16, - 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8, - 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab, - 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea, - 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8, - 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79, - 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd, - 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14, - 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb, - 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8, - 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8, - 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b, - 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc, - 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88, - 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c, - 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60, - 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56, - 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf, - 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad, - 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c, - 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f, - 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65, - 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc, - 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1, - 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba, - 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e, - 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55, - 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27, - 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54, - 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f, - 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45, - 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf, - 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab, - 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe, - 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e, - 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc, - 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd, - 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74, - 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26, - 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae, - 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3, - 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73, - 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3, - 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94, - 0x45, 0x03, 0x11, 0x00, 0x00, + // 1524 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5b, 0x6f, 0x1b, 0xb7, + 0x12, 0xf6, 0xea, 0xae, 0x91, 0xac, 0xc8, 0x8c, 0x93, 0xc8, 0x3a, 0x39, 0x27, 0x3e, 0x4a, 0x82, + 0xe3, 0x9c, 0xd6, 0x72, 0xe2, 0xa4, 0x41, 0xae, 0x48, 0x7d, 0x91, 0x2b, 0xc5, 0x8e, 0xaa, 0x50, + 0x8a, 0xd1, 0x0b, 0x8a, 0xc5, 0x4a, 0x4b, 0xcb, 0x5b, 0x4b, 0xdc, 0xed, 0x92, 0xeb, 0xc2, 0x79, + 0xeb, 0x53, 0x51, 0xf4, 0xad, 0x40, 0xd1, 0x3f, 0xd0, 0x87, 0xfe, 0x9d, 0xa2, 0xbf, 0xa3, 0xbf, + 0xa0, 0x2f, 0x05, 0xc9, 0xbd, 0xc9, 0x49, 0x6c, 0x55, 0x79, 0x31, 0xb8, 0xe4, 0x7c, 0x1f, 0x39, + 0x9c, 0x6f, 0x86, 0x63, 0xc1, 0x4d, 0xdb, 0x21, 0x74, 0x40, 0x28, 0xf3, 0xd8, 0x9a, 0xe3, 0xda, + 0xdc, 0x5e, 0xe3, 0xae, 0x31, 0x20, 0x6b, 0xc7, 0x77, 0xd4, 0xa0, 0x2e, 0x27, 0xd1, 0x52, 0x64, + 0xa6, 0x66, 0xea, 0x6a, 0xf5, 0xf8, 0x4e, 0xf5, 0xda, 0xd0, 0xb6, 0x87, 0x23, 0xa2, 0xd0, 0x7d, + 0xef, 0x60, 0x8d, 0x5b, 0x63, 0xc2, 0xb8, 0x31, 0x76, 0x94, 0x65, 0xf5, 0x3f, 0xa7, 0x0d, 0xbe, + 0x75, 0x0d, 0xc7, 0x21, 0xae, 0xcf, 0x54, 0xfb, 0xee, 0x12, 0xa4, 0xba, 0x8e, 0x41, 0xd1, 0x12, + 0xe4, 0x24, 0xab, 0x6e, 0x99, 0x15, 0x6d, 0x59, 0x5b, 0x29, 0xe2, 0xac, 0xfc, 0x6e, 0x99, 0xe8, + 0x0a, 0x64, 0x99, 0x63, 0x50, 0xb1, 0x92, 0x90, 0x2b, 0x19, 0xf1, 0xd9, 0x32, 0xd1, 0x73, 0x00, + 0x69, 0xc3, 0xb8, 0xc1, 0x49, 0xe5, 0xc2, 0xb2, 0xb6, 0x52, 0x58, 0xff, 0x7f, 0xfd, 0x9d, 0xa7, + 0xad, 0x8b, 0x8d, 0xea, 0xbd, 0x10, 0x81, 0x63, 0x68, 0x74, 0x03, 0x4a, 0x8e, 0xe1, 0x12, 0xca, + 0xf5, 0x60, 0xaf, 0xa4, 0xdc, 0xab, 0xa8, 0x66, 0xbb, 0x6a, 0xc7, 0x8f, 0x21, 0x45, 0x8d, 0x31, + 0xa9, 0xa4, 0xe4, 0x5e, 0x1f, 0x9e, 0xb1, 0x57, 0xcf, 0xf5, 0xe8, 0xc0, 0xe0, 0x46, 0x7f, 0x44, + 0xba, 0xdc, 0xb5, 0xe8, 0x10, 0x4b, 0x24, 0x7a, 0x02, 0xa9, 0x23, 0x8b, 0x9a, 0x95, 0xd2, 0xb2, + 0xb6, 0x52, 0x5a, 0x5f, 0x39, 0xef, 0xb4, 0xe2, 0xcf, 0xae, 0x45, 0x4d, 0x2c, 0x51, 0xe8, 0x21, + 0x00, 0xe3, 0x86, 0xcb, 0x75, 0x71, 0xcf, 0x95, 0xb4, 0x3c, 0x45, 0xb5, 0xae, 0xee, 0xb8, 0x1e, + 0xdc, 0x71, 0xbd, 0x17, 0x04, 0x01, 0xe7, 0xa5, 0xb5, 0xf8, 0x46, 0x1f, 0x41, 0x8e, 0x50, 0x53, + 0x01, 0x33, 0xe7, 0x02, 0xb3, 0x84, 0x9a, 0x12, 0xf6, 0x1c, 0xc0, 0xe0, 0xdc, 0xb5, 0xfa, 0x1e, + 0x27, 0xac, 0x92, 0x9d, 0xee, 0x8e, 0x37, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x0e, 0x14, 0x18, 0x37, + 0x06, 0x47, 0xba, 0xb4, 0xae, 0xe4, 0x24, 0xd9, 0xcd, 0xb3, 0xc8, 0x84, 0xb5, 0x0c, 0x18, 0x06, + 0x16, 0x8e, 0xd1, 0x2e, 0x14, 0x84, 0x1b, 0x3a, 0x39, 0x26, 0x94, 0xb3, 0x4a, 0x7e, 0xca, 0xc0, + 0x5b, 0x63, 0xd2, 0x90, 0x08, 0x0c, 0x3c, 0x1c, 0xa3, 0xc7, 0x90, 0x1e, 0x59, 0xf4, 0x88, 0x55, + 0xe0, 0xfc, 0xe3, 0x08, 0x9a, 0x3d, 0x61, 0x8c, 0x15, 0x06, 0x3d, 0x84, 0x8c, 0x90, 0x8f, 0xc7, + 0x2a, 0x05, 0x89, 0xfe, 0xef, 0xd9, 0xce, 0x70, 0x8f, 0x61, 0x1f, 0x80, 0x3e, 0x83, 0x7f, 0x31, + 0x63, 0x4c, 0x74, 0xc7, 0xb5, 0x07, 0x84, 0x31, 0xdd, 0x60, 0x7a, 0x4c, 0x80, 0x95, 0xe2, 0x3b, + 0x42, 0xb4, 0x69, 0xdb, 0xa3, 0x7d, 0x63, 0xe4, 0x11, 0x7c, 0x45, 0xc0, 0x3b, 0x0a, 0xbd, 0xc1, + 0x3a, 0xa1, 0x4c, 0xd1, 0x0e, 0x94, 0x07, 0x87, 0xd6, 0xc8, 0x54, 0x4a, 0x1e, 0xd8, 0x1e, 0xe5, + 0x95, 0x79, 0x49, 0x77, 0xf5, 0x0d, 0xba, 0x57, 0x2d, 0xca, 0xef, 0xae, 0x2b, 0xc2, 0x92, 0x44, + 0x09, 0x8a, 0x2d, 0x81, 0xa9, 0xfe, 0xa0, 0x01, 0x44, 0xd9, 0x82, 0x9e, 0x43, 0x96, 0x50, 0xee, + 0x5a, 0x84, 0x55, 0xb4, 0xe5, 0xe4, 0x4a, 0x61, 0xfd, 0xf6, 0xf4, 0xa9, 0x56, 0x6f, 0x50, 0xee, + 0x9e, 0xe0, 0x80, 0xa0, 0xba, 0x06, 0x69, 0x39, 0x83, 0xca, 0x90, 0x3c, 0x22, 0x27, 0x32, 0xe3, + 0xf3, 0x58, 0x0c, 0xd1, 0x22, 0xa4, 0x8f, 0xc5, 0x71, 0x64, 0xae, 0xe7, 0xb1, 0xfa, 0xa8, 0xfe, + 0x9c, 0x00, 0x88, 0x54, 0x85, 0x0c, 0x98, 0x0f, 0x75, 0xa5, 0x8f, 0x0d, 0xc7, 0x3f, 0xd1, 0x93, + 0xe9, 0x85, 0x19, 0x0d, 0x5f, 0x18, 0x8e, 0x3a, 0x5d, 0xd1, 0x88, 0x4d, 0xa1, 0x07, 0x50, 0x31, + 0x5d, 0xdb, 0x71, 0x88, 0xa9, 0x47, 0x12, 0xf6, 0x6f, 0x53, 0x1c, 0x2d, 0x8d, 0x2f, 0xfb, 0xeb, + 0x11, 0xa9, 0xba, 0xb7, 0xaf, 0x61, 0xe1, 0x0d, 0xf2, 0xb7, 0x38, 0xfa, 0x2c, 0xee, 0x68, 0x61, + 0xfd, 0xd6, 0x19, 0x67, 0x0f, 0xe9, 0x54, 0xa0, 0x14, 0xee, 0x51, 0xe2, 0x81, 0x56, 0xfd, 0x35, + 0x0d, 0xf9, 0x50, 0xd8, 0xa8, 0x0e, 0x29, 0x99, 0xdf, 0xda, 0xb9, 0xf9, 0x2d, 0xed, 0xd0, 0x3e, + 0x80, 0x41, 0xa9, 0xcd, 0x0d, 0x6e, 0xd9, 0xd4, 0x3f, 0xc7, 0xbd, 0xa9, 0xf3, 0xa8, 0xbe, 0x11, + 0x62, 0x9b, 0x73, 0x38, 0xc6, 0x84, 0xbe, 0x82, 0xf9, 0x31, 0x61, 0xcc, 0x18, 0xfa, 0x39, 0x2a, + 0x6b, 0x69, 0x61, 0xfd, 0xfe, 0xf4, 0xd4, 0x2f, 0x14, 0x5c, 0x7e, 0x34, 0xe7, 0x70, 0x71, 0x1c, + 0xfb, 0xae, 0xfe, 0xa6, 0x01, 0x44, 0x7b, 0xa3, 0x36, 0x14, 0x4c, 0xc2, 0x06, 0xae, 0xe5, 0x48, + 0x37, 0xb4, 0x19, 0x6a, 0x73, 0x9c, 0xe0, 0x54, 0xc9, 0x4b, 0xbc, 0x4f, 0xc9, 0xab, 0xfe, 0xa5, + 0x41, 0x31, 0xee, 0x0b, 0xfa, 0x14, 0x52, 0xfc, 0xc4, 0x51, 0x21, 0x2a, 0xad, 0x3f, 0x9e, 0xed, + 0x46, 0xea, 0xbd, 0x13, 0x87, 0x60, 0x49, 0x84, 0x4a, 0x90, 0xf0, 0x1f, 0xc6, 0x14, 0x4e, 0x58, + 0x26, 0xfa, 0x00, 0x16, 0x3c, 0x3a, 0xb0, 0xc7, 0x8e, 0x4b, 0x18, 0x23, 0xa6, 0xce, 0xac, 0xd7, + 0x44, 0xde, 0x7f, 0x0a, 0x97, 0xe3, 0x0b, 0x5d, 0xeb, 0x35, 0x41, 0xff, 0x83, 0x0b, 0xa7, 0x4d, + 0x53, 0xd2, 0xb4, 0x34, 0x69, 0x58, 0xbb, 0x07, 0x29, 0xb1, 0x27, 0x5a, 0x84, 0x72, 0xef, 0xf3, + 0x4e, 0x43, 0x7f, 0xd5, 0xee, 0x76, 0x1a, 0x5b, 0xad, 0x9d, 0x56, 0x63, 0xbb, 0x3c, 0x87, 0x72, + 0x90, 0xea, 0x36, 0xda, 0xbd, 0xb2, 0x86, 0x8a, 0x90, 0xc3, 0x8d, 0xad, 0x46, 0x6b, 0xbf, 0xb1, + 0x5d, 0x4e, 0x6c, 0x66, 0x7d, 0x89, 0x57, 0xff, 0x10, 0xa5, 0x24, 0xaa, 0xb9, 0x4d, 0x80, 0xa8, + 0x80, 0xfb, 0xb9, 0x7b, 0x6b, 0xea, 0xab, 0xc0, 0xf9, 0xb0, 0x7c, 0xa3, 0x47, 0xb0, 0x14, 0x66, + 0x69, 0xa8, 0x88, 0xc9, 0x34, 0xbd, 0x12, 0xa4, 0x69, 0xb4, 0x2e, 0xf3, 0x14, 0x3d, 0x83, 0xab, + 0x01, 0x76, 0x42, 0xad, 0x01, 0x3c, 0x29, 0xe1, 0x01, 0x7f, 0xfc, 0xfe, 0xfd, 0x44, 0xff, 0x29, + 0x01, 0x29, 0xf1, 0x1c, 0xcc, 0xd4, 0xbc, 0x3c, 0xf5, 0x85, 0x90, 0x94, 0x42, 0xb8, 0x35, 0xcd, + 0xb3, 0x13, 0x0f, 0xfb, 0xa4, 0x48, 0x53, 0xef, 0x23, 0xd2, 0xda, 0xee, 0x99, 0xc1, 0xbd, 0x04, + 0x0b, 0x5b, 0xcd, 0xd6, 0xde, 0xb6, 0xbe, 0xd7, 0x6a, 0xef, 0x36, 0xb6, 0xf5, 0x6e, 0x67, 0xa3, + 0x5d, 0xd6, 0xd0, 0x65, 0x40, 0x9d, 0x0d, 0xdc, 0x68, 0xf7, 0x26, 0xe6, 0x13, 0xd5, 0x6f, 0x20, + 0x2d, 0x9f, 0x48, 0xf4, 0x00, 0x52, 0xe2, 0x91, 0xf4, 0xc3, 0x7b, 0x63, 0x1a, 0x07, 0xb1, 0x44, + 0xa0, 0x3a, 0x5c, 0x0c, 0x02, 0x23, 0x9f, 0xd9, 0x89, 0x70, 0x2e, 0xf8, 0x4b, 0x72, 0x13, 0x19, + 0x87, 0xda, 0x53, 0xc8, 0x05, 0x7d, 0x12, 0x5a, 0x82, 0x4b, 0xe2, 0x20, 0xfa, 0x6e, 0xab, 0xbd, + 0x7d, 0xca, 0x11, 0x80, 0x4c, 0xb7, 0x81, 0xf7, 0x1b, 0xb8, 0xac, 0x89, 0xf1, 0xd6, 0x5e, 0x4b, + 0x68, 0x36, 0x51, 0xbb, 0x0f, 0x19, 0xf5, 0x36, 0x23, 0x04, 0xa9, 0x81, 0x6d, 0xaa, 0xe4, 0x4c, + 0x63, 0x39, 0x46, 0x15, 0xc8, 0xfa, 0xea, 0xf0, 0x5f, 0xa4, 0xe0, 0xb3, 0xf6, 0xbb, 0x06, 0xa5, + 0xc9, 0xca, 0x8c, 0x5e, 0x42, 0x91, 0xc9, 0x8a, 0xa2, 0xab, 0xd2, 0x3e, 0x43, 0x2d, 0x6a, 0xce, + 0xe1, 0x82, 0xe2, 0x50, 0x94, 0xff, 0x86, 0xbc, 0x45, 0xb9, 0x1e, 0x3d, 0x15, 0xc9, 0xe6, 0x1c, + 0xce, 0x59, 0x94, 0xab, 0xe5, 0x6b, 0x00, 0x7d, 0xdb, 0x1e, 0xf9, 0xeb, 0x42, 0x4c, 0xb9, 0xe6, + 0x1c, 0xce, 0xf7, 0x83, 0x36, 0x01, 0x5d, 0x87, 0xa2, 0x69, 0x7b, 0xfd, 0x11, 0xf1, 0x4d, 0x84, + 0x54, 0x34, 0xb1, 0x89, 0x9a, 0x95, 0x46, 0x61, 0xa2, 0xd6, 0x7e, 0xcc, 0x00, 0x44, 0x5d, 0x17, + 0xea, 0x09, 0x7f, 0x44, 0xc7, 0x76, 0xe0, 0x1a, 0x63, 0xf9, 0xf0, 0x0b, 0x7f, 0xee, 0x4c, 0xd5, + 0xb2, 0xa9, 0xe1, 0x8e, 0x04, 0x62, 0xd5, 0xf8, 0xa9, 0x0f, 0xb4, 0x0a, 0x17, 0x63, 0x7d, 0xa0, + 0x7e, 0x68, 0xb0, 0x43, 0x3d, 0xac, 0x61, 0xe5, 0xa8, 0xd1, 0x6b, 0x1a, 0xec, 0xb0, 0x65, 0x56, + 0xff, 0x4c, 0xfa, 0x67, 0x92, 0x70, 0xf4, 0x12, 0xe6, 0x0f, 0x3c, 0x3a, 0x10, 0x89, 0xac, 0xcb, + 0x66, 0x7c, 0x96, 0x82, 0x5f, 0x0c, 0x28, 0xda, 0x82, 0xb2, 0x0f, 0x97, 0x6d, 0xd7, 0x1a, 0x5a, + 0xd4, 0x18, 0xe9, 0x93, 0xdc, 0x89, 0x19, 0xb8, 0x17, 0x03, 0xae, 0x9d, 0xf8, 0x1e, 0x2d, 0xc8, + 0x1f, 0x58, 0x23, 0xa2, 0x68, 0x93, 0x33, 0xd0, 0xe6, 0x04, 0x5c, 0x52, 0x5d, 0x83, 0xc2, 0xc8, + 0xa2, 0x44, 0xa7, 0xde, 0xb8, 0x4f, 0x5c, 0x19, 0xd1, 0x24, 0x06, 0x31, 0xd5, 0x96, 0x33, 0xe8, + 0x3a, 0xcc, 0x0f, 0xec, 0x91, 0x37, 0xa6, 0x81, 0x49, 0x5a, 0x9a, 0x14, 0xd5, 0xa4, 0x6f, 0xb4, + 0x09, 0x85, 0x91, 0x6d, 0x98, 0xfa, 0xd8, 0x36, 0xbd, 0x51, 0xf0, 0x3f, 0xc1, 0x59, 0x0d, 0xec, + 0x0b, 0x69, 0x88, 0x41, 0xa0, 0xd4, 0x18, 0x75, 0xa1, 0xc4, 0x6c, 0xcf, 0x1d, 0x10, 0xfd, 0x98, + 0xb8, 0x4c, 0xbc, 0xbe, 0xd9, 0x19, 0x3c, 0x9b, 0x57, 0x1c, 0xfb, 0x8a, 0xa2, 0xfa, 0xbd, 0x06, + 0x85, 0x98, 0x76, 0xd0, 0x0e, 0xa4, 0xa5, 0xfc, 0xa6, 0x69, 0x3b, 0xdf, 0xa6, 0x3e, 0xac, 0xe0, + 0xe8, 0x36, 0x2c, 0x06, 0x65, 0x45, 0xc9, 0x79, 0xa2, 0xae, 0x20, 0x7f, 0x4d, 0x6d, 0xaa, 0x0a, + 0xcb, 0x2f, 0x1a, 0x64, 0x7c, 0x4f, 0xb7, 0x21, 0xe3, 0x5f, 0xd4, 0x2c, 0x72, 0xf3, 0xb1, 0xe8, + 0x13, 0xc8, 0xf5, 0x3d, 0xd1, 0x9a, 0xfb, 0x72, 0xff, 0xa7, 0x3c, 0x59, 0x89, 0x6e, 0x99, 0xb5, + 0x2f, 0x61, 0xe1, 0x8d, 0xd5, 0xa8, 0x75, 0xd6, 0x62, 0xad, 0xb3, 0x70, 0x9b, 0x2b, 0x53, 0x62, + 0xea, 0xfd, 0x13, 0x4e, 0x26, 0xdd, 0x0e, 0xd7, 0x36, 0x4f, 0x38, 0x91, 0x6e, 0x6f, 0x3a, 0x70, + 0xd5, 0xb2, 0xdf, 0x7d, 0xae, 0x4d, 0xf5, 0x5f, 0x41, 0x47, 0x4c, 0x76, 0xb4, 0x2f, 0x36, 0x87, + 0x16, 0x3f, 0xf4, 0xfa, 0xf5, 0x81, 0x3d, 0x5e, 0x53, 0xf6, 0xab, 0x16, 0x65, 0xdc, 0xf5, 0xc6, + 0x84, 0xaa, 0xf7, 0x76, 0x2d, 0xa2, 0x5a, 0x55, 0xbf, 0x33, 0x0c, 0x09, 0x5d, 0x1d, 0x46, 0x3f, + 0x37, 0xf4, 0x33, 0x72, 0xfa, 0xee, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x5e, 0xa6, 0x6a, + 0x92, 0x10, 0x00, 0x00, } diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go index 2ac2d28c47..929e8995bb 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -18,42 +18,9 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -var ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", -} +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -var ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, -} - -func (x ConstantSampler_ConstantDecision) String() string { - return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) -} - -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. +// Global configuration of the trace service. type TraceConfig struct { // The global default sampler used to make decisions on span sampling. // @@ -178,15 +145,99 @@ func (m *TraceConfig) GetMaxNumberOfLinks() int64 { return 0 } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofWrappers() []interface{} { - return []interface{}{ +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TraceConfig_OneofMarshaler, _TraceConfig_OneofUnmarshaler, _TraceConfig_OneofSizer, []interface{}{ (*TraceConfig_ProbabilitySampler)(nil), (*TraceConfig_ConstantSampler)(nil), (*TraceConfig_RateLimitingSampler)(nil), } } +func _TraceConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TraceConfig) + // sampler + switch x := m.Sampler.(type) { + case *TraceConfig_ProbabilitySampler: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProbabilitySampler); err != nil { + return err + } + case *TraceConfig_ConstantSampler: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConstantSampler); err != nil { + return err + } + case *TraceConfig_RateLimitingSampler: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RateLimitingSampler); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TraceConfig.Sampler has unexpected type %T", x) + } + return nil +} + +func _TraceConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TraceConfig) + switch tag { + case 1: // sampler.probability_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ProbabilitySampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_ProbabilitySampler{msg} + return true, err + case 2: // sampler.constant_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ConstantSampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_ConstantSampler{msg} + return true, err + case 3: // sampler.rate_limiting_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RateLimitingSampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_RateLimitingSampler{msg} + return true, err + default: + return false, nil + } +} + +func _TraceConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TraceConfig) + // sampler + switch x := m.Sampler.(type) { + case *TraceConfig_ProbabilitySampler: + s := proto.Size(x.ProbabilitySampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *TraceConfig_ConstantSampler: + s := proto.Size(x.ConstantSampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *TraceConfig_RateLimitingSampler: + s := proto.Size(x.RateLimitingSampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // Sampler that tries to uniformly sample traces with a given probability. // The probability of sampling a trace is equal to that of the specified probability. type ProbabilitySampler struct { @@ -229,12 +280,14 @@ func (m *ProbabilitySampler) GetSamplingProbability() float64 { return 0 } -// Sampler that always makes a constant decision on span sampling. +// Sampler that makes a constant decision (either always "yes" or always "no") +// on span sampling. type ConstantSampler struct { - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // Whether spans should be always sampled, or never sampled. + Decision bool `protobuf:"varint,1,opt,name=decision,proto3" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } @@ -262,11 +315,11 @@ func (m *ConstantSampler) XXX_DiscardUnknown() { var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo -func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { +func (m *ConstantSampler) GetDecision() bool { if m != nil { return m.Decision } - return ConstantSampler_ALWAYS_OFF + return false } // Sampler that tries to sample with a rate per time window. @@ -311,7 +364,6 @@ func (m *RateLimitingSampler) GetQps() int64 { } func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") @@ -323,36 +375,32 @@ func init() { } var fileDescriptor_5359209b41ff50c5 = []byte{ - // 486 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40, - 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa, - 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4, - 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4, - 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33, - 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98, - 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52, - 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9, - 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc, - 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d, - 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d, - 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90, - 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e, - 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85, - 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71, - 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00, - 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71, - 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37, - 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8, - 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e, - 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04, - 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4, - 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39, - 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe, - 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d, - 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49, - 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd, - 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49, - 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30, - 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2, - 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00, + // 431 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6f, 0xd4, 0x30, + 0x10, 0x85, 0x09, 0x5b, 0xda, 0x32, 0x3d, 0x74, 0xe5, 0xa8, 0x28, 0x45, 0x3d, 0x54, 0x7b, 0xa1, + 0x42, 0x24, 0xa1, 0x70, 0x40, 0x5c, 0x90, 0xd8, 0x0a, 0xc4, 0xa1, 0xc0, 0x2a, 0x20, 0x21, 0x71, + 0x09, 0x4e, 0xea, 0x0d, 0x16, 0xf1, 0x38, 0xd8, 0x93, 0xa8, 0xfc, 0x17, 0x7e, 0x6c, 0x15, 0x67, + 0x95, 0x64, 0xbb, 0xed, 0xde, 0xec, 0xf7, 0xde, 0xf7, 0x2c, 0x79, 0x6c, 0x78, 0xa1, 0x2b, 0x81, + 0xb9, 0x40, 0x5b, 0xdb, 0xb8, 0x32, 0x9a, 0x74, 0x4c, 0x86, 0xe7, 0x22, 0x6e, 0xce, 0xbb, 0x45, + 0x9a, 0x6b, 0x5c, 0xca, 0x22, 0x72, 0x1e, 0x3b, 0x1e, 0xd2, 0x9d, 0x12, 0xb9, 0x50, 0xd4, 0x9c, + 0xcf, 0xfe, 0xef, 0xc0, 0xc1, 0xf7, 0x76, 0x73, 0xe1, 0x00, 0xf6, 0x0b, 0xfc, 0xca, 0xe8, 0x8c, + 0x67, 0xb2, 0x94, 0xf4, 0x2f, 0xb5, 0x5c, 0x55, 0xa5, 0x30, 0x81, 0x77, 0xea, 0x9d, 0x1d, 0xbc, + 0x0a, 0xa3, 0x7b, 0x8b, 0xa2, 0xc5, 0x40, 0x7d, 0xeb, 0xa0, 0x4f, 0x0f, 0x12, 0x56, 0x6d, 0xa8, + 0xec, 0x07, 0x4c, 0x73, 0x8d, 0x96, 0x38, 0x52, 0x5f, 0xff, 0xd0, 0xd5, 0x3f, 0xdf, 0x52, 0x7f, + 0xb1, 0x42, 0x86, 0xee, 0xc3, 0x7c, 0x5d, 0x62, 0x57, 0x70, 0x64, 0x38, 0x89, 0xb4, 0x94, 0x4a, + 0x92, 0xc4, 0xa2, 0x6f, 0x9f, 0xb8, 0xf6, 0x68, 0x4b, 0x7b, 0xc2, 0x49, 0x5c, 0xae, 0xb0, 0xe1, + 0x04, 0xdf, 0x6c, 0xca, 0xec, 0x0d, 0x04, 0x8a, 0x5f, 0xa7, 0x58, 0xab, 0x4c, 0x98, 0x54, 0x2f, + 0x53, 0x4e, 0x64, 0x64, 0x56, 0x93, 0xb0, 0xc1, 0xce, 0xa9, 0x77, 0x36, 0x49, 0x8e, 0x14, 0xbf, + 0xfe, 0xe2, 0xec, 0xaf, 0xcb, 0xf7, 0xbd, 0xc9, 0xde, 0xc2, 0xf1, 0x2d, 0x10, 0x51, 0x13, 0x27, + 0xa9, 0xd1, 0x06, 0x8f, 0x1c, 0xf9, 0x64, 0x4c, 0x0e, 0x2e, 0x7b, 0x07, 0x27, 0xeb, 0xa8, 0x12, + 0xd6, 0xf2, 0x42, 0xa4, 0xa2, 0x11, 0x48, 0x36, 0xd8, 0x75, 0x74, 0x30, 0xa2, 0x3f, 0x77, 0x81, + 0x0f, 0xce, 0x67, 0x21, 0xf8, 0xeb, 0x7c, 0x29, 0xf1, 0x8f, 0x0d, 0xf6, 0x1c, 0x36, 0x1d, 0x61, + 0x97, 0xad, 0x3e, 0x7f, 0x0c, 0x7b, 0xab, 0xab, 0x9b, 0x7d, 0x04, 0xb6, 0x39, 0x58, 0xf6, 0x12, + 0x7c, 0x17, 0x90, 0x58, 0x8c, 0x5c, 0xf7, 0x48, 0xbc, 0xe4, 0x2e, 0x6b, 0x16, 0xc2, 0xe1, 0xad, + 0x09, 0xb2, 0xa7, 0xb0, 0x7f, 0x25, 0x72, 0x69, 0xa5, 0x46, 0x47, 0xee, 0x27, 0xfd, 0x7e, 0xf6, + 0x0c, 0xfc, 0x3b, 0x46, 0xc2, 0xa6, 0x30, 0xf9, 0x5b, 0x59, 0x97, 0x9e, 0x24, 0xed, 0x72, 0xde, + 0xc0, 0x89, 0xd4, 0xf7, 0x0f, 0x76, 0x3e, 0x1d, 0xbd, 0xed, 0x45, 0x6b, 0x2d, 0xbc, 0x9f, 0xf3, + 0x42, 0xd2, 0xef, 0x3a, 0x8b, 0x72, 0xad, 0xe2, 0x8e, 0x0a, 0x25, 0x5a, 0x32, 0xb5, 0x12, 0xd8, + 0xdd, 0x78, 0x3c, 0x14, 0x86, 0xdd, 0xef, 0x2a, 0x04, 0x86, 0xc5, 0xf0, 0xc9, 0xb2, 0x5d, 0x27, + 0xbf, 0xbe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x98, 0xa1, 0x9e, 0x88, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE deleted file mode 100644 index 23a0ada2fb..0000000000 --- a/vendor/github.com/coreos/go-semver/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index 76cf4852c7..110fc23e15 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -19,7 +19,6 @@ import ( "bytes" "errors" "fmt" - "regexp" "strconv" "strings" ) @@ -77,14 +76,6 @@ func (v *Version) Set(version string) error { return fmt.Errorf("%s is not in dotted-tri format", version) } - if err := validateIdentifier(string(preRelease)); err != nil { - return fmt.Errorf("failed to validate pre-release: %v", err) - } - - if err := validateIdentifier(metadata); err != nil { - return fmt.Errorf("failed to validate metadata: %v", err) - } - parsed := make([]int64, 3, 3) for i, v := range dotParts[:3] { @@ -233,13 +224,6 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int { bInt = true } - // Numeric identifiers always have lower precedence than non-numeric identifiers. - if aInt && !bInt { - return -1 - } else if !aInt && bInt { - return 1 - } - // Handle Integer Comparison if aInt && bInt { if aI > bI { @@ -282,15 +266,3 @@ func (v *Version) BumpPatch() { v.PreRelease = PreRelease("") v.Metadata = "" } - -// validateIdentifier makes sure the provided identifier satisfies semver spec -func validateIdentifier(id string) error { - if id != "" && !reIdentifier.MatchString(id) { - return fmt.Errorf("%s is not a valid semver identifier", id) - } - return nil -} - -// reIdentifier is a regular expression used to check that pre-release and metadata -// identifiers satisfy the spec requirements -var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize.go b/vendor/github.com/coreos/go-systemd/unit/deserialize.go index c0c06bdfc2..bf97280521 100644 --- a/vendor/github.com/coreos/go-systemd/unit/deserialize.go +++ b/vendor/github.com/coreos/go-systemd/unit/deserialize.go @@ -88,7 +88,7 @@ func (l *lexer) lex() { l.errchan <- err return } - if !bytes.ContainsAny(line, SYSTEMD_NEWLINE) { + if bytes.IndexAny(line, SYSTEMD_NEWLINE) == -1 { l.errchan <- ErrLineTooLong return } diff --git a/vendor/github.com/go-logr/zapr/Gopkg.lock b/vendor/github.com/go-logr/zapr/Gopkg.lock index 8da0a8f76c..4e0d08ecaa 100644 --- a/vendor/github.com/go-logr/zapr/Gopkg.lock +++ b/vendor/github.com/go-logr/zapr/Gopkg.lock @@ -2,31 +2,24 @@ [[projects]] - digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" + branch = "master" name = "github.com/go-logr/logr" packages = ["."] - pruneopts = "UT" revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - version = "v0.1.0" [[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" name = "go.uber.org/atomic" packages = ["."] - pruneopts = "UT" revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" version = "v1.3.2" [[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" name = "go.uber.org/multierr" packages = ["."] - pruneopts = "UT" revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" version = "v1.1.0" [[projects]] - digest = "1:9580b1b079114140ade8cec957685344d14f00119e0241f6b369633cb346eeb3" name = "go.uber.org/zap" packages = [ ".", @@ -34,19 +27,14 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore", + "zapcore" ] - pruneopts = "UT" revision = "eeedf312bc6c57391d84767a4cd413f02a917974" version = "v1.8.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/go-logr/logr", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - ] + inputs-digest = "9b4b2f75bc457ddc6ebb276c32fc8e30525b6133ee76886c804ba0a6b815abc2" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/go-logr/zapr/Gopkg.toml b/vendor/github.com/go-logr/zapr/Gopkg.toml index ae475d72e0..78944774cd 100644 --- a/vendor/github.com/go-logr/zapr/Gopkg.toml +++ b/vendor/github.com/go-logr/zapr/Gopkg.toml @@ -26,8 +26,8 @@ [[constraint]] + branch = "master" name = "github.com/go-logr/logr" - version = "0.1.0" [[constraint]] name = "go.uber.org/zap" diff --git a/vendor/github.com/gobuffalo/envy/env b/vendor/github.com/gobuffalo/envy/.env similarity index 100% rename from vendor/github.com/gobuffalo/envy/env rename to vendor/github.com/gobuffalo/envy/.env diff --git a/vendor/github.com/gobuffalo/envy/.gitignore b/vendor/github.com/gobuffalo/envy/.gitignore index 05bc384b5e..3689718594 100644 --- a/vendor/github.com/gobuffalo/envy/.gitignore +++ b/vendor/github.com/gobuffalo/envy/.gitignore @@ -27,4 +27,3 @@ generated/ bin/* gin-bin .idea/ -.env diff --git a/vendor/github.com/gobuffalo/envy/.travis.yml b/vendor/github.com/gobuffalo/envy/.travis.yml new file mode 100644 index 0000000000..1fb041a259 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/.travis.yml @@ -0,0 +1,36 @@ +language: go + +sudo: false + +matrix: + include: + - os: linux + go: "1.9.x" + - os: windows + go: "1.9.x" + - os: linux + go: "1.10.x" + - os: windows + go: "1.10.x" + - os: linux + go: "1.11.x" + env: + - GO111MODULE=off + - os: windows + go: "1.11.x" + env: + - GO111MODULE=off + - os: linux + go: "1.11.x" + env: + - GO111MODULE=on + - os: windows + go: "1.11.x" + env: + - GO111MODULE=on + +install: false + +script: + - go get -v -t ./... + - go test -v -timeout=5s -race ./... diff --git a/vendor/github.com/gobuffalo/envy/Makefile b/vendor/github.com/gobuffalo/envy/Makefile index 46aece8ff3..b0db1c4f88 100644 --- a/vendor/github.com/gobuffalo/envy/Makefile +++ b/vendor/github.com/gobuffalo/envy/Makefile @@ -2,23 +2,23 @@ TAGS ?= "sqlite" GO_BIN ?= go install: - packr2 + packr $(GO_BIN) install -v . deps: $(GO_BIN) get github.com/gobuffalo/release - $(GO_BIN) get github.com/gobuffalo/packr/v2/packr2 + $(GO_BIN) get github.com/gobuffalo/packr/packr $(GO_BIN) get -tags ${TAGS} -t ./... ifeq ($(GO111MODULE),on) $(GO_BIN) mod tidy endif build: - packr2 + packr $(GO_BIN) build -v . test: - packr2 + packr $(GO_BIN) test -tags ${TAGS} ./... ci-test: @@ -32,7 +32,7 @@ update: ifeq ($(GO111MODULE),on) $(GO_BIN) mod tidy endif - packr2 + packr make test make install ifeq ($(GO111MODULE),on) diff --git a/vendor/github.com/gobuffalo/envy/SHOULDERS.md b/vendor/github.com/gobuffalo/envy/SHOULDERS.md deleted file mode 100644 index 2384f72f4b..0000000000 --- a/vendor/github.com/gobuffalo/envy/SHOULDERS.md +++ /dev/null @@ -1,14 +0,0 @@ -# github.com/gobuffalo/envy Stands on the Shoulders of Giants - -github.com/gobuffalo/envy does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work. - -Thank you to the following **GIANTS**: - - -* [github.com/davecgh/go-spew](https://godoc.org/github.com/davecgh/go-spew) - -* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv) - -* [github.com/rogpeppe/go-internal](https://godoc.org/github.com/rogpeppe/go-internal) - -* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify) diff --git a/vendor/github.com/gobuffalo/envy/azure-pipelines.yml b/vendor/github.com/gobuffalo/envy/azure-pipelines.yml deleted file mode 100644 index 144c4a2094..0000000000 --- a/vendor/github.com/gobuffalo/envy/azure-pipelines.yml +++ /dev/null @@ -1,59 +0,0 @@ -variables: - GOBIN: "$(GOPATH)/bin" # Go binaries path - GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path - modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code - -jobs: -- job: Windows - pool: - vmImage: "vs2017-win2016" - strategy: - matrix: - go 1.9: - go_version: "1.9" - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11" - GO111MODULE: "off" - steps: - - template: azure-tests.yml - -- job: macOS - pool: - vmImage: "macOS-10.13" - strategy: - matrix: - go 1.9: - go_version: "1.9" - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11" - GO111MODULE: "off" - steps: - - template: azure-tests.yml - -- job: Linux - pool: - vmImage: "ubuntu-16.04" - strategy: - matrix: - go 1.9: - go_version: "1.9" - go 1.10: - go_version: "1.10" - go 1.11 (on): - go_version: "1.11" - GO111MODULE: "on" - go 1.11 (off): - go_version: "1.11" - GO111MODULE: "off" - steps: - - template: azure-tests.yml diff --git a/vendor/github.com/gobuffalo/envy/azure-tests.yml b/vendor/github.com/gobuffalo/envy/azure-tests.yml deleted file mode 100644 index eea5822fad..0000000000 --- a/vendor/github.com/gobuffalo/envy/azure-tests.yml +++ /dev/null @@ -1,19 +0,0 @@ -steps: - - task: GoTool@0 - inputs: - version: $(go_version) - - task: Bash@3 - inputs: - targetType: inline - script: | - mkdir -p "$(GOBIN)" - mkdir -p "$(GOPATH)/pkg" - mkdir -p "$(modulePath)" - shopt -s extglob - mv !(gopath) "$(modulePath)" - displayName: "Setup Go Workspace" - - script: | - go get -t -v ./... - go test -race ./... - workingDirectory: "$(modulePath)" - displayName: "Tests" diff --git a/vendor/github.com/gobuffalo/envy/azure.sh b/vendor/github.com/gobuffalo/envy/azure.sh deleted file mode 100644 index f70949796f..0000000000 --- a/vendor/github.com/gobuffalo/envy/azure.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -xe - -cat >> .env << EOF -# This is a comment -# We can use equal or colon notation -DIR: root -FLAVOUR: none -INSIDE_FOLDER=false -EOF diff --git a/vendor/github.com/gobuffalo/envy/envy.go b/vendor/github.com/gobuffalo/envy/envy.go index dc31ba2c0c..a5a7a3c6fc 100644 --- a/vendor/github.com/gobuffalo/envy/envy.go +++ b/vendor/github.com/gobuffalo/envy/envy.go @@ -68,16 +68,8 @@ func loadEnv() { } } -// Mods returns true if module support is enabled, false otherwise -// See https://github.com/golang/go/wiki/Modules#how-to-install-and-activate-module-support for details func Mods() bool { - go111 := Get(GO111MODULE, "") - - if !InGoPath() { - return go111 != "off" - } - - return go111 == "on" + return Get(GO111MODULE, "off") == "on" } // Reload the ENV variables. Useful if @@ -176,7 +168,7 @@ func Map() map[string]string { for k, v := range env { cp[k] = v } - return cp + return env } // Temp makes a copy of the values and allows operation on @@ -203,16 +195,6 @@ func GoBin() string { return Get("GO_BIN", "go") } -func InGoPath() bool { - pwd, _ := os.Getwd() - for _, p := range GoPaths() { - if strings.HasPrefix(pwd, p) { - return true - } - } - return false -} - // GoPaths returns all possible GOPATHS that are set. func GoPaths() []string { gp := Get("GOPATH", "") diff --git a/vendor/github.com/gobuffalo/envy/go.mod b/vendor/github.com/gobuffalo/envy/go.mod index d951b7ce1c..f10a493794 100644 --- a/vendor/github.com/gobuffalo/envy/go.mod +++ b/vendor/github.com/gobuffalo/envy/go.mod @@ -1,7 +1,8 @@ module github.com/gobuffalo/envy require ( - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gobuffalo/packr/v2 v2.0.0-rc.14 + github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/joho/godotenv v1.3.0 github.com/rogpeppe/go-internal v1.1.0 github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/gobuffalo/envy/go.sum b/vendor/github.com/gobuffalo/envy/go.sum index f11ef4ce58..172ff9ae8c 100644 --- a/vendor/github.com/gobuffalo/envy/go.sum +++ b/vendor/github.com/gobuffalo/envy/go.sum @@ -1,17 +1,383 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= +github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= +github.com/gobuffalo/buffalo-plugins v1.10.0 h1:oF+BJ20zenp31xJnjkFcXAMUqlrMMTicsD/UwQWCW/s= +github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= +github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/events v1.1.9 h1:ukq5ys/h0TuiX7eLJyZBD1dJOy0r19JTEYmgXKG9j+Y= +github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 h1:51NF9n6h4nGMooU5ALB+uJCM0UOmcWjAegIZb/ePtoE= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= +github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5 h1:boQS3dA9PxhyufJEWIILrG6pJQbDnpwP2rFyvWacdoY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687 h1:uZ+G4JprR0UEq0aHZs+6eP7TEZuFfrIkmQWejIBV/QQ= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14 h1:K41dNilNHDbDgCL3UE6K02JGuN89pjvD9oG99X7Om2s= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= +github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8 h1:VfG72pyIxgtC7+3X9CMHI0AOl4LwyRAg98WAgsvffi8= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598 h1:S8GOgffXV1X3fpVG442QRfWOt0iFl79eHJ7OPt725bo= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b h1:Z5QW7z0ycYrOVRYv3z4FeSZbRNvVwUfXHKQSZKb5A6w= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gobuffalo/envy/shoulders.md b/vendor/github.com/gobuffalo/envy/shoulders.md new file mode 100644 index 0000000000..120330be34 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/shoulders.md @@ -0,0 +1,16 @@ +# github.com/gobuffalo/envy Stands on the Shoulders of Giants + +github.com/gobuffalo/envy does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work. + +Thank you to the following **GIANTS**: + + +* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy) + +* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv) + +* [github.com/rogpeppe/go-internal/modfile](https://godoc.org/github.com/rogpeppe/go-internal/modfile) + +* [github.com/rogpeppe/go-internal/module](https://godoc.org/github.com/rogpeppe/go-internal/module) + +* [github.com/rogpeppe/go-internal/semver](https://godoc.org/github.com/rogpeppe/go-internal/semver) diff --git a/vendor/github.com/gobuffalo/envy/version.go b/vendor/github.com/gobuffalo/envy/version.go index b1623aef72..f0de312463 100644 --- a/vendor/github.com/gobuffalo/envy/version.go +++ b/vendor/github.com/gobuffalo/envy/version.go @@ -1,3 +1,3 @@ package envy -const Version = "v1.7.0" +const Version = "v1.6.12" diff --git a/vendor/github.com/markbates/inflect/.gitignore b/vendor/github.com/gobuffalo/flect/.gitignore similarity index 100% rename from vendor/github.com/markbates/inflect/.gitignore rename to vendor/github.com/gobuffalo/flect/.gitignore diff --git a/vendor/github.com/gobuffalo/flect/.travis.yml b/vendor/github.com/gobuffalo/flect/.travis.yml new file mode 100644 index 0000000000..1fb041a259 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/.travis.yml @@ -0,0 +1,36 @@ +language: go + +sudo: false + +matrix: + include: + - os: linux + go: "1.9.x" + - os: windows + go: "1.9.x" + - os: linux + go: "1.10.x" + - os: windows + go: "1.10.x" + - os: linux + go: "1.11.x" + env: + - GO111MODULE=off + - os: windows + go: "1.11.x" + env: + - GO111MODULE=off + - os: linux + go: "1.11.x" + env: + - GO111MODULE=on + - os: windows + go: "1.11.x" + env: + - GO111MODULE=on + +install: false + +script: + - go get -v -t ./... + - go test -v -timeout=5s -race ./... diff --git a/vendor/github.com/markbates/inflect/LICENCE b/vendor/github.com/gobuffalo/flect/LICENSE.txt similarity index 95% rename from vendor/github.com/markbates/inflect/LICENCE rename to vendor/github.com/gobuffalo/flect/LICENSE.txt index 8a36b944a5..123ddc0d80 100644 --- a/vendor/github.com/markbates/inflect/LICENCE +++ b/vendor/github.com/gobuffalo/flect/LICENSE.txt @@ -1,4 +1,5 @@ -Copyright (c) 2011 Chris Farmiloe +The MIT License (MIT) +Copyright (c) 2018 Mark Bates Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/markbates/inflect/Makefile b/vendor/github.com/gobuffalo/flect/Makefile similarity index 75% rename from vendor/github.com/markbates/inflect/Makefile rename to vendor/github.com/gobuffalo/flect/Makefile index e0e2f3baa4..b0db1c4f88 100644 --- a/vendor/github.com/markbates/inflect/Makefile +++ b/vendor/github.com/gobuffalo/flect/Makefile @@ -3,53 +3,44 @@ GO_BIN ?= go install: packr - $(GO_BIN) install -tags ${TAGS} -v . - make tidy - -tidy: -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -else - echo skipping go mod tidy -endif + $(GO_BIN) install -v . deps: $(GO_BIN) get github.com/gobuffalo/release $(GO_BIN) get github.com/gobuffalo/packr/packr $(GO_BIN) get -tags ${TAGS} -t ./... - make tidy +ifeq ($(GO111MODULE),on) + $(GO_BIN) mod tidy +endif build: packr $(GO_BIN) build -v . - make tidy test: packr $(GO_BIN) test -tags ${TAGS} ./... - make tidy ci-test: $(GO_BIN) test -tags ${TAGS} -race ./... - make tidy lint: gometalinter --vendor ./... --deadline=1m --skip=internal - make tidy update: $(GO_BIN) get -u -tags ${TAGS} - make tidy +ifeq ($(GO111MODULE),on) + $(GO_BIN) mod tidy +endif packr make test make install - make tidy +ifeq ($(GO111MODULE),on) + $(GO_BIN) mod tidy +endif release-test: $(GO_BIN) test -tags ${TAGS} -race ./... - make tidy release: - make tidy release -y -f version.go - make tidy diff --git a/vendor/github.com/gobuffalo/flect/README.md b/vendor/github.com/gobuffalo/flect/README.md new file mode 100644 index 0000000000..af0afbda98 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/README.md @@ -0,0 +1,36 @@ +# Flect + +

+GoDoc +Build Status +Go Report Card +

+ +This is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original. + +## Installation + +```bash +$ go get -u -v github.com/gobuffalo/flect +``` + +## `github.com/gobuffalo/flect` +GoDoc + +The `github.com/gobuffalo/flect` package contains "basic" inflection tools, like pluralization, singularization, etc... + +### The `Ident` Type + +In addition to helpful methods that take in a `string` and return a `string`, there is an `Ident` type that can be used to create new, custom, inflection rules. + +The `Ident` type contains two fields. + +* `Original` - This is the original `string` that was used to create the `Ident` +* `Parts` - This is a `[]string` that represents all of the "parts" of the string, that have been split apart, making the segments easier to work with + +Examples of creating new inflection rules using `Ident` can be found in the `github.com/gobuffalo/flect/name` package. + +## `github.com/gobuffalo/flect/name` +GoDoc + +The `github.com/gobuffalo/flect/name` package contains more "business" inflection rules like creating proper names, table names, etc... diff --git a/vendor/github.com/gobuffalo/flect/acronyms.go b/vendor/github.com/gobuffalo/flect/acronyms.go new file mode 100644 index 0000000000..b169724a4c --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/acronyms.go @@ -0,0 +1,152 @@ +package flect + +import "sync" + +var acronymsMoot = &sync.RWMutex{} + +var baseAcronyms = map[string]bool{ + "OK": true, + "UTF8": true, + "HTML": true, + "JSON": true, + "JWT": true, + "ID": true, + "UUID": true, + "SQL": true, + "ACK": true, + "ACL": true, + "ADSL": true, + "AES": true, + "ANSI": true, + "API": true, + "ARP": true, + "ATM": true, + "BGP": true, + "BSS": true, + "CCITT": true, + "CHAP": true, + "CIDR": true, + "CIR": true, + "CLI": true, + "CPE": true, + "CPU": true, + "CRC": true, + "CRT": true, + "CSMA": true, + "CMOS": true, + "DCE": true, + "DEC": true, + "DES": true, + "DHCP": true, + "DNS": true, + "DRAM": true, + "DSL": true, + "DSLAM": true, + "DTE": true, + "DMI": true, + "EHA": true, + "EIA": true, + "EIGRP": true, + "EOF": true, + "ESS": true, + "FCC": true, + "FCS": true, + "FDDI": true, + "FTP": true, + "GBIC": true, + "gbps": true, + "GEPOF": true, + "HDLC": true, + "HTTP": true, + "HTTPS": true, + "IANA": true, + "ICMP": true, + "IDF": true, + "IDS": true, + "IEEE": true, + "IETF": true, + "IMAP": true, + "IP": true, + "IPS": true, + "ISDN": true, + "ISP": true, + "kbps": true, + "LACP": true, + "LAN": true, + "LAPB": true, + "LAPF": true, + "LLC": true, + "MAC": true, + "Mbps": true, + "MC": true, + "MDF": true, + "MIB": true, + "MoCA": true, + "MPLS": true, + "MTU": true, + "NAC": true, + "NAT": true, + "NBMA": true, + "NIC": true, + "NRZ": true, + "NRZI": true, + "NVRAM": true, + "OSI": true, + "OSPF": true, + "OUI": true, + "PAP": true, + "PAT": true, + "PC": true, + "PIM": true, + "PCM": true, + "PDU": true, + "POP3": true, + "POTS": true, + "PPP": true, + "PPTP": true, + "PTT": true, + "PVST": true, + "RAM": true, + "RARP": true, + "RFC": true, + "RIP": true, + "RLL": true, + "ROM": true, + "RSTP": true, + "RTP": true, + "RCP": true, + "SDLC": true, + "SFD": true, + "SFP": true, + "SLARP": true, + "SLIP": true, + "SMTP": true, + "SNA": true, + "SNAP": true, + "SNMP": true, + "SOF": true, + "SRAM": true, + "SSH": true, + "SSID": true, + "STP": true, + "SYN": true, + "TDM": true, + "TFTP": true, + "TIA": true, + "TOFU": true, + "UDP": true, + "URL": true, + "URI": true, + "USB": true, + "UTP": true, + "VC": true, + "VLAN": true, + "VLSM": true, + "VPN": true, + "W3C": true, + "WAN": true, + "WEP": true, + "WiFi": true, + "WPA": true, + "WWW": true, +} diff --git a/vendor/github.com/gobuffalo/flect/camelize.go b/vendor/github.com/gobuffalo/flect/camelize.go new file mode 100644 index 0000000000..8a9928e8be --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/camelize.go @@ -0,0 +1,48 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Camelize returns a camelize version of a string +// bob dylan = bobDylan +// widget_id = widgetID +// WidgetID = widgetID +func Camelize(s string) string { + return New(s).Camelize().String() +} + +// Camelize returns a camelize version of a string +// bob dylan = bobDylan +// widget_id = widgetID +// WidgetID = widgetID +func (i Ident) Camelize() Ident { + var out []string + for i, part := range i.Parts { + var x string + var capped bool + if strings.ToLower(part) == "id" { + out = append(out, "ID") + continue + } + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + if i == 0 { + x += string(unicode.ToLower(c)) + continue + } + if !capped { + capped = true + x += string(unicode.ToUpper(c)) + continue + } + x += string(c) + } + } + if x != "" { + out = append(out, x) + } + } + return New(strings.Join(out, "")) +} diff --git a/vendor/github.com/gobuffalo/flect/capitalize.go b/vendor/github.com/gobuffalo/flect/capitalize.go new file mode 100644 index 0000000000..42ecc166cb --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/capitalize.go @@ -0,0 +1,27 @@ +package flect + +import "unicode" + +// Capitalize will cap the first letter of string +// user = User +// bob dylan = Bob dylan +// widget_id = Widget_id +func Capitalize(s string) string { + return New(s).Capitalize().String() +} + +// Capitalize will cap the first letter of string +// user = User +// bob dylan = Bob dylan +// widget_id = Widget_id +func (i Ident) Capitalize() Ident { + var x string + if len(i.Parts) == 0 { + return New("") + } + x = string(unicode.ToTitle(rune(i.Original[0]))) + if len(i.Original) > 1 { + x += i.Original[1:] + } + return New(x) +} diff --git a/vendor/github.com/gobuffalo/flect/custom_data.go b/vendor/github.com/gobuffalo/flect/custom_data.go new file mode 100644 index 0000000000..12d3f9b470 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/custom_data.go @@ -0,0 +1,82 @@ +package flect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/gobuffalo/envy" +) + +func init() { + loadCustomData("inflections.json", "INFLECT_PATH", "could not read inflection file", LoadInflections) + loadCustomData("acronyms.json", "ACRONYMS_PATH", "could not read acronyms file", LoadAcronyms) +} + +//CustomDataParser are functions that parse data like acronyms or +//plurals in the shape of a io.Reader it receives. +type CustomDataParser func(io.Reader) error + +func loadCustomData(defaultFile, env, readErrorMessage string, parser CustomDataParser) { + pwd, _ := os.Getwd() + path := envy.Get(env, filepath.Join(pwd, defaultFile)) + + if _, err := os.Stat(path); err != nil { + return + } + + b, err := ioutil.ReadFile(path) + if err != nil { + fmt.Printf("%s %s (%s)\n", readErrorMessage, path, err) + return + } + + if err = parser(bytes.NewReader(b)); err != nil { + fmt.Println(err) + } +} + +//LoadAcronyms loads rules from io.Reader param +func LoadAcronyms(r io.Reader) error { + m := []string{} + err := json.NewDecoder(r).Decode(&m) + + if err != nil { + return fmt.Errorf("could not decode acronyms JSON from reader: %s", err) + } + + acronymsMoot.Lock() + defer acronymsMoot.Unlock() + + for _, acronym := range m { + baseAcronyms[acronym] = true + } + + return nil +} + +//LoadInflections loads rules from io.Reader param +func LoadInflections(r io.Reader) error { + m := map[string]string{} + + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return fmt.Errorf("could not decode inflection JSON from reader: %s", err) + } + + pluralMoot.Lock() + defer pluralMoot.Unlock() + singularMoot.Lock() + defer singularMoot.Unlock() + + for s, p := range m { + singleToPlural[s] = p + pluralToSingle[p] = s + } + + return nil +} diff --git a/vendor/github.com/gobuffalo/flect/dasherize.go b/vendor/github.com/gobuffalo/flect/dasherize.go new file mode 100644 index 0000000000..c7a8a33e36 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/dasherize.go @@ -0,0 +1,34 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Dasherize returns an alphanumeric, lowercased, dashed string +// Donald E. Knuth = donald-e-knuth +// Test with + sign = test-with-sign +// admin/WidgetID = admin-widget-id +func Dasherize(s string) string { + return New(s).Dasherize().String() +} + +// Dasherize returns an alphanumeric, lowercased, dashed string +// Donald E. Knuth = donald-e-knuth +// Test with + sign = test-with-sign +// admin/WidgetID = admin-widget-id +func (i Ident) Dasherize() Ident { + var parts []string + + for _, part := range i.Parts { + var x string + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + x += string(c) + } + } + parts = xappend(parts, x) + } + + return New(strings.ToLower(strings.Join(parts, "-"))) +} diff --git a/vendor/github.com/gobuffalo/flect/flect.go b/vendor/github.com/gobuffalo/flect/flect.go new file mode 100644 index 0000000000..ee81b6f2bc --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/flect.go @@ -0,0 +1,43 @@ +/* +Package flect is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original. +*/ +package flect + +import ( + "strings" + "unicode" +) + +var spaces = []rune{'_', ' ', ':', '-', '/'} + +func isSpace(c rune) bool { + for _, r := range spaces { + if r == c { + return true + } + } + return unicode.IsSpace(c) +} + +func xappend(a []string, ss ...string) []string { + for _, s := range ss { + s = strings.TrimSpace(s) + for _, x := range spaces { + s = strings.Trim(s, string(x)) + } + if _, ok := baseAcronyms[strings.ToUpper(s)]; ok { + s = strings.ToUpper(s) + } + if s != "" { + a = append(a, s) + } + } + return a +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/vendor/github.com/gobuffalo/flect/go.mod b/vendor/github.com/gobuffalo/flect/go.mod new file mode 100644 index 0000000000..8501063a0e --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/go.mod @@ -0,0 +1,6 @@ +module github.com/gobuffalo/flect + +require ( + github.com/gobuffalo/envy v1.6.11 + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/gobuffalo/flect/go.sum b/vendor/github.com/gobuffalo/flect/go.sum new file mode 100644 index 0000000000..dec25e8087 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/go.sum @@ -0,0 +1,321 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= +github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gobuffalo/flect/ident.go b/vendor/github.com/gobuffalo/flect/ident.go new file mode 100644 index 0000000000..8c58455739 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/ident.go @@ -0,0 +1,109 @@ +package flect + +import ( + "encoding" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Ident represents the string and it's parts +type Ident struct { + Original string + Parts []string +} + +// String implements fmt.Stringer and returns the original string +func (i Ident) String() string { + return i.Original +} + +// New creates a new Ident from the string +func New(s string) Ident { + i := Ident{ + Original: s, + Parts: toParts(s), + } + + return i +} + +var splitRx = regexp.MustCompile("[^\\p{L}]") + +func toParts(s string) []string { + parts := []string{} + s = strings.TrimSpace(s) + if len(s) == 0 { + return parts + } + if _, ok := baseAcronyms[strings.ToUpper(s)]; ok { + return []string{strings.ToUpper(s)} + } + var prev rune + var x string + for _, c := range s { + cs := string(c) + // fmt.Println("### cs ->", cs) + // fmt.Println("### unicode.IsControl(c) ->", unicode.IsControl(c)) + // fmt.Println("### unicode.IsDigit(c) ->", unicode.IsDigit(c)) + // fmt.Println("### unicode.IsGraphic(c) ->", unicode.IsGraphic(c)) + // fmt.Println("### unicode.IsLetter(c) ->", unicode.IsLetter(c)) + // fmt.Println("### unicode.IsLower(c) ->", unicode.IsLower(c)) + // fmt.Println("### unicode.IsMark(c) ->", unicode.IsMark(c)) + // fmt.Println("### unicode.IsPrint(c) ->", unicode.IsPrint(c)) + // fmt.Println("### unicode.IsPunct(c) ->", unicode.IsPunct(c)) + // fmt.Println("### unicode.IsSpace(c) ->", unicode.IsSpace(c)) + // fmt.Println("### unicode.IsTitle(c) ->", unicode.IsTitle(c)) + // fmt.Println("### unicode.IsUpper(c) ->", unicode.IsUpper(c)) + if !utf8.ValidRune(c) { + continue + } + + if isSpace(c) { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + + if unicode.IsUpper(c) && !unicode.IsUpper(prev) { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + if unicode.IsUpper(c) && baseAcronyms[strings.ToUpper(x)] { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + if unicode.IsLetter(c) || unicode.IsDigit(c) || unicode.IsPunct(c) || c == '`' { + prev = c + x += cs + continue + } + + parts = xappend(parts, x) + x = "" + prev = c + } + parts = xappend(parts, x) + + return parts +} + +var _ encoding.TextUnmarshaler = &Ident{} +var _ encoding.TextMarshaler = &Ident{} + +//UnmarshalText unmarshalls byte array into the Ident +func (i *Ident) UnmarshalText(data []byte) error { + (*i) = New(string(data)) + return nil +} + +//MarshalText marshals Ident into byte array +func (i Ident) MarshalText() ([]byte, error) { + return []byte(i.Original), nil +} diff --git a/vendor/github.com/gobuffalo/flect/lower_upper.go b/vendor/github.com/gobuffalo/flect/lower_upper.go new file mode 100644 index 0000000000..930da58d8c --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/lower_upper.go @@ -0,0 +1,13 @@ +package flect + +import "strings" + +// ToUpper is a convience wrapper for strings.ToUpper +func (i Ident) ToUpper() Ident { + return New(strings.ToUpper(i.Original)) +} + +// ToLower is a convience wrapper for strings.ToLower +func (i Ident) ToLower() Ident { + return New(strings.ToLower(i.Original)) +} diff --git a/vendor/github.com/gobuffalo/flect/ordinalize.go b/vendor/github.com/gobuffalo/flect/ordinalize.go new file mode 100644 index 0000000000..1ce27b3a0d --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/ordinalize.go @@ -0,0 +1,43 @@ +package flect + +import ( + "fmt" + "strconv" +) + +// Ordinalize converts a number to an ordinal version +// 42 = 42nd +// 45 = 45th +// 1 = 1st +func Ordinalize(s string) string { + return New(s).Ordinalize().String() +} + +// Ordinalize converts a number to an ordinal version +// 42 = 42nd +// 45 = 45th +// 1 = 1st +func (i Ident) Ordinalize() Ident { + number, err := strconv.Atoi(i.Original) + if err != nil { + return i + } + var s string + switch abs(number) % 100 { + case 11, 12, 13: + s = fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + s = fmt.Sprintf("%dst", number) + case 2: + s = fmt.Sprintf("%dnd", number) + case 3: + s = fmt.Sprintf("%drd", number) + } + } + if s != "" { + return New(s) + } + return New(fmt.Sprintf("%dth", number)) +} diff --git a/vendor/github.com/gobuffalo/flect/pascalize.go b/vendor/github.com/gobuffalo/flect/pascalize.go new file mode 100644 index 0000000000..76f0c6a706 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/pascalize.go @@ -0,0 +1,25 @@ +package flect + +import ( + "unicode" +) + +// Pascalize returns a string with each segment capitalized +// user = User +// bob dylan = BobDylan +// widget_id = WidgetID +func Pascalize(s string) string { + return New(s).Pascalize().String() +} + +// Pascalize returns a string with each segment capitalized +// user = User +// bob dylan = BobDylan +// widget_id = WidgetID +func (i Ident) Pascalize() Ident { + c := i.Camelize() + if len(c.String()) == 0 { + return c + } + return New(string(unicode.ToUpper(rune(c.Original[0]))) + c.Original[1:]) +} diff --git a/vendor/github.com/gobuffalo/flect/plural_rules.go b/vendor/github.com/gobuffalo/flect/plural_rules.go new file mode 100644 index 0000000000..1ebee22ab6 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/plural_rules.go @@ -0,0 +1,240 @@ +package flect + +var pluralRules = []rule{} + +// AddPlural adds a rule that will replace the given suffix with the replacement suffix. +func AddPlural(suffix string, repl string) { + pluralMoot.Lock() + defer pluralMoot.Unlock() + pluralRules = append(pluralRules, rule{ + suffix: suffix, + fn: func(s string) string { + s = s[:len(s)-len(suffix)] + return s + repl + }, + }) + + pluralRules = append(pluralRules, rule{ + suffix: repl, + fn: noop, + }) +} + +var singleToPlural = map[string]string{ + "human": "humans", + "matrix": "matrices", + "vertix": "vertices", + "index": "indices", + "mouse": "mice", + "louse": "lice", + "ress": "resses", + "ox": "oxen", + "quiz": "quizzes", + "series": "series", + "octopus": "octopi", + "equipment": "equipment", + "information": "information", + "rice": "rice", + "money": "money", + "species": "species", + "fish": "fish", + "sheep": "sheep", + "jeans": "jeans", + "police": "police", + "dear": "dear", + "goose": "geese", + "tooth": "teeth", + "foot": "feet", + "bus": "buses", + "fez": "fezzes", + "piano": "pianos", + "halo": "halos", + "photo": "photos", + "aircraft": "aircraft", + "alumna": "alumnae", + "alumnus": "alumni", + "analysis": "analyses", + "antenna": "antennas", + "antithesis": "antitheses", + "apex": "apexes", + "appendix": "appendices", + "axis": "axes", + "bacillus": "bacilli", + "bacterium": "bacteria", + "basis": "bases", + "beau": "beaus", + "bison": "bison", + "bureau": "bureaus", + "campus": "campuses", + "château": "châteaux", + "codex": "codices", + "concerto": "concertos", + "corpus": "corpora", + "crisis": "crises", + "curriculum": "curriculums", + "deer": "deer", + "diagnosis": "diagnoses", + "die": "dice", + "dwarf": "dwarves", + "ellipsis": "ellipses", + "erratum": "errata", + "faux pas": "faux pas", + "focus": "foci", + "formula": "formulas", + "fungus": "fungi", + "genus": "genera", + "graffito": "graffiti", + "grouse": "grouse", + "half": "halves", + "hoof": "hooves", + "hypothesis": "hypotheses", + "larva": "larvae", + "libretto": "librettos", + "loaf": "loaves", + "locus": "loci", + "minutia": "minutiae", + "moose": "moose", + "nebula": "nebulae", + "nucleus": "nuclei", + "oasis": "oases", + "offspring": "offspring", + "opus": "opera", + "parenthesis": "parentheses", + "phenomenon": "phenomena", + "phylum": "phyla", + "prognosis": "prognoses", + "radius": "radiuses", + "referendum": "referendums", + "salmon": "salmon", + "shrimp": "shrimp", + "stimulus": "stimuli", + "stratum": "strata", + "swine": "swine", + "syllabus": "syllabi", + "symposium": "symposiums", + "synopsis": "synopses", + "tableau": "tableaus", + "thesis": "theses", + "thief": "thieves", + "trout": "trout", + "tuna": "tuna", + "vertebra": "vertebrae", + "vita": "vitae", + "vortex": "vortices", + "wharf": "wharves", + "wife": "wives", + "wolf": "wolves", + "datum": "data", + "testis": "testes", + "alias": "aliases", + "house": "houses", + "shoe": "shoes", + "news": "news", + "ovum": "ova", + "foo": "foos", +} + +var pluralToSingle = map[string]string{} + +func init() { + for k, v := range singleToPlural { + pluralToSingle[v] = k + } +} +func init() { + AddPlural("campus", "campuses") + AddPlural("man", "men") + AddPlural("tz", "tzes") + AddPlural("alias", "aliases") + AddPlural("oasis", "oasis") + AddPlural("wife", "wives") + AddPlural("basis", "basis") + AddPlural("atum", "ata") + AddPlural("adium", "adia") + AddPlural("actus", "acti") + AddPlural("irus", "iri") + AddPlural("iterion", "iteria") + AddPlural("dium", "diums") + AddPlural("ovum", "ova") + AddPlural("ize", "izes") + AddPlural("dge", "dges") + AddPlural("focus", "foci") + AddPlural("child", "children") + AddPlural("oaf", "oaves") + AddPlural("randum", "randa") + AddPlural("base", "bases") + AddPlural("atus", "atuses") + AddPlural("ode", "odes") + AddPlural("person", "people") + AddPlural("va", "vae") + AddPlural("leus", "li") + AddPlural("oot", "eet") + AddPlural("oose", "eese") + AddPlural("box", "boxes") + AddPlural("ium", "ia") + AddPlural("sis", "ses") + AddPlural("nna", "nnas") + AddPlural("eses", "esis") + AddPlural("stis", "stes") + AddPlural("ex", "ices") + AddPlural("ula", "ulae") + AddPlural("isis", "ises") + AddPlural("ouses", "ouse") + AddPlural("olves", "olf") + AddPlural("lf", "lves") + AddPlural("rf", "rves") + AddPlural("afe", "aves") + AddPlural("bfe", "bves") + AddPlural("cfe", "cves") + AddPlural("dfe", "dves") + AddPlural("efe", "eves") + AddPlural("gfe", "gves") + AddPlural("hfe", "hves") + AddPlural("ife", "ives") + AddPlural("jfe", "jves") + AddPlural("kfe", "kves") + AddPlural("lfe", "lves") + AddPlural("mfe", "mves") + AddPlural("nfe", "nves") + AddPlural("ofe", "oves") + AddPlural("pfe", "pves") + AddPlural("qfe", "qves") + AddPlural("rfe", "rves") + AddPlural("sfe", "sves") + AddPlural("tfe", "tves") + AddPlural("ufe", "uves") + AddPlural("vfe", "vves") + AddPlural("wfe", "wves") + AddPlural("xfe", "xves") + AddPlural("yfe", "yves") + AddPlural("zfe", "zves") + AddPlural("hive", "hives") + AddPlural("quy", "quies") + AddPlural("by", "bies") + AddPlural("cy", "cies") + AddPlural("dy", "dies") + AddPlural("fy", "fies") + AddPlural("gy", "gies") + AddPlural("hy", "hies") + AddPlural("jy", "jies") + AddPlural("ky", "kies") + AddPlural("ly", "lies") + AddPlural("my", "mies") + AddPlural("ny", "nies") + AddPlural("py", "pies") + AddPlural("qy", "qies") + AddPlural("ry", "ries") + AddPlural("sy", "sies") + AddPlural("ty", "ties") + AddPlural("vy", "vies") + AddPlural("wy", "wies") + AddPlural("xy", "xies") + AddPlural("zy", "zies") + AddPlural("x", "xes") + AddPlural("ch", "ches") + AddPlural("ss", "sses") + AddPlural("sh", "shes") + AddPlural("oe", "oes") + AddPlural("io", "ios") + AddPlural("o", "oes") +} diff --git a/vendor/github.com/gobuffalo/flect/pluralize.go b/vendor/github.com/gobuffalo/flect/pluralize.go new file mode 100644 index 0000000000..1b9d43e462 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/pluralize.go @@ -0,0 +1,49 @@ +package flect + +import ( + "strings" + "sync" +) + +var pluralMoot = &sync.RWMutex{} + +// Pluralize returns a plural version of the string +// user = users +// person = people +// datum = data +func Pluralize(s string) string { + return New(s).Pluralize().String() +} + +// Pluralize returns a plural version of the string +// user = users +// person = people +// datum = data +func (i Ident) Pluralize() Ident { + s := i.Original + if len(s) == 0 { + return New("") + } + + pluralMoot.RLock() + defer pluralMoot.RUnlock() + + ls := strings.ToLower(s) + if _, ok := pluralToSingle[ls]; ok { + return i + } + if p, ok := singleToPlural[ls]; ok { + return New(p) + } + for _, r := range pluralRules { + if strings.HasSuffix(ls, r.suffix) { + return New(r.fn(s)) + } + } + + if strings.HasSuffix(ls, "s") { + return i + } + + return New(i.String() + "s") +} diff --git a/vendor/github.com/gobuffalo/flect/rule.go b/vendor/github.com/gobuffalo/flect/rule.go new file mode 100644 index 0000000000..dc616b337d --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/rule.go @@ -0,0 +1,10 @@ +package flect + +type ruleFn func(string) string + +type rule struct { + suffix string + fn ruleFn +} + +func noop(s string) string { return s } diff --git a/vendor/github.com/gobuffalo/flect/singular_rules.go b/vendor/github.com/gobuffalo/flect/singular_rules.go new file mode 100644 index 0000000000..14b471cd00 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/singular_rules.go @@ -0,0 +1,122 @@ +package flect + +var singularRules = []rule{} + +// AddSingular adds a rule that will replace the given suffix with the replacement suffix. +func AddSingular(ext string, repl string) { + singularMoot.Lock() + defer singularMoot.Unlock() + singularRules = append(singularRules, rule{ + suffix: ext, + fn: func(s string) string { + s = s[:len(s)-len(ext)] + return s + repl + }, + }) + + singularRules = append(singularRules, rule{ + suffix: repl, + fn: func(s string) string { + return s + }, + }) +} + +func init() { + AddSingular("ria", "rion") + AddSingular("news", "news") + AddSingular("halves", "half") + AddSingular("appendix", "appendix") + AddSingular("zzes", "zz") + AddSingular("ulas", "ula") + AddSingular("psis", "pse") + AddSingular("genus", "genera") + AddSingular("phyla", "phylum") + AddSingular("odice", "odex") + AddSingular("oxen", "ox") + AddSingular("ianos", "iano") + AddSingular("ulus", "uli") + AddSingular("mice", "mouse") + AddSingular("ouses", "ouse") + AddSingular("mni", "mnus") + AddSingular("ocus", "oci") + AddSingular("shoes", "shoe") + AddSingular("oasis", "oasis") + AddSingular("lice", "louse") + AddSingular("men", "man") + AddSingular("ta", "tum") + AddSingular("ia", "ium") + AddSingular("tives", "tive") + AddSingular("ldren", "ld") + AddSingular("people", "person") + AddSingular("aves", "afe") + AddSingular("uses", "us") + AddSingular("bves", "bfe") + AddSingular("cves", "cfe") + AddSingular("dves", "dfe") + AddSingular("eves", "efe") + AddSingular("gves", "gfe") + AddSingular("hves", "hfe") + AddSingular("chives", "chive") + AddSingular("ives", "ife") + AddSingular("movies", "movie") + AddSingular("jeans", "jeans") + AddSingular("cesses", "cess") + AddSingular("cess", "cess") + AddSingular("acti", "actus") + AddSingular("itzes", "itz") + AddSingular("usses", "uss") + AddSingular("uss", "uss") + AddSingular("jves", "jfe") + AddSingular("kves", "kfe") + AddSingular("mves", "mfe") + AddSingular("nves", "nfe") + AddSingular("moves", "move") + AddSingular("oves", "ofe") + AddSingular("pves", "pfe") + AddSingular("qves", "qfe") + AddSingular("sves", "sfe") + AddSingular("tves", "tfe") + AddSingular("uves", "ufe") + AddSingular("vves", "vfe") + AddSingular("wves", "wfe") + AddSingular("xves", "xfe") + AddSingular("yves", "yfe") + AddSingular("zves", "zfe") + AddSingular("hives", "hive") + AddSingular("lves", "lf") + AddSingular("rves", "rf") + AddSingular("quies", "quy") + AddSingular("bies", "by") + AddSingular("cies", "cy") + AddSingular("dies", "dy") + AddSingular("fies", "fy") + AddSingular("gies", "gy") + AddSingular("hies", "hy") + AddSingular("jies", "jy") + AddSingular("kies", "ky") + AddSingular("lies", "ly") + AddSingular("mies", "my") + AddSingular("nies", "ny") + AddSingular("pies", "py") + AddSingular("qies", "qy") + AddSingular("ries", "ry") + AddSingular("sies", "sy") + AddSingular("ties", "ty") + AddSingular("vies", "vy") + AddSingular("wies", "wy") + AddSingular("xies", "xy") + AddSingular("zies", "zy") + AddSingular("xes", "x") + AddSingular("ches", "ch") + AddSingular("sses", "ss") + AddSingular("shes", "sh") + AddSingular("oes", "o") + AddSingular("ress", "ress") + AddSingular("iri", "irus") + AddSingular("irus", "irus") + AddSingular("tuses", "tus") + AddSingular("tus", "tus") + AddSingular("s", "") + AddSingular("ss", "ss") +} diff --git a/vendor/github.com/gobuffalo/flect/singularize.go b/vendor/github.com/gobuffalo/flect/singularize.go new file mode 100644 index 0000000000..a08cbd5862 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/singularize.go @@ -0,0 +1,44 @@ +package flect + +import ( + "strings" + "sync" +) + +var singularMoot = &sync.RWMutex{} + +// Singularize returns a singular version of the string +// users = user +// data = datum +// people = person +func Singularize(s string) string { + return New(s).Singularize().String() +} + +// Singularize returns a singular version of the string +// users = user +// data = datum +// people = person +func (i Ident) Singularize() Ident { + s := i.Original + if len(s) == 0 { + return i + } + + singularMoot.RLock() + defer singularMoot.RUnlock() + ls := strings.ToLower(s) + if p, ok := pluralToSingle[ls]; ok { + return New(p) + } + if _, ok := singleToPlural[ls]; ok { + return i + } + for _, r := range singularRules { + if strings.HasSuffix(ls, r.suffix) { + return New(r.fn(s)) + } + } + + return i +} diff --git a/vendor/github.com/gobuffalo/flect/titleize.go b/vendor/github.com/gobuffalo/flect/titleize.go new file mode 100644 index 0000000000..e7a20d37f2 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/titleize.go @@ -0,0 +1,31 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Titleize will capitalize the start of each part +// "Nice to see you!" = "Nice To See You!" +// "i've read a book! have you?" = "I've Read A Book! Have You?" +// "This is `code` ok" = "This Is `code` OK" +func Titleize(s string) string { + return New(s).Titleize().String() +} + +// Titleize will capitalize the start of each part +// "Nice to see you!" = "Nice To See You!" +// "i've read a book! have you?" = "I've Read A Book! Have You?" +// "This is `code` ok" = "This Is `code` OK" +func (i Ident) Titleize() Ident { + var parts []string + for _, part := range i.Parts { + var x string + x = string(unicode.ToTitle(rune(part[0]))) + if len(part) > 1 { + x += part[1:] + } + parts = append(parts, x) + } + return New(strings.Join(parts, " ")) +} diff --git a/vendor/github.com/gobuffalo/flect/underscore.go b/vendor/github.com/gobuffalo/flect/underscore.go new file mode 100644 index 0000000000..b92488aa00 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/underscore.go @@ -0,0 +1,34 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Underscore a string +// bob dylan = bob_dylan +// Nice to see you! = nice_to_see_you +// widgetID = widget_id +func Underscore(s string) string { + return New(s).Underscore().String() +} + +// Underscore a string +// bob dylan = bob_dylan +// Nice to see you! = nice_to_see_you +// widgetID = widget_id +func (i Ident) Underscore() Ident { + var out []string + for _, part := range i.Parts { + var x string + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + x += string(c) + } + } + if x != "" { + out = append(out, x) + } + } + return New(strings.ToLower(strings.Join(out, "_"))) +} diff --git a/vendor/github.com/gobuffalo/flect/version.go b/vendor/github.com/gobuffalo/flect/version.go new file mode 100644 index 0000000000..b5f62170dc --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/version.go @@ -0,0 +1,4 @@ +package flect + +//Version holds Flect version number +const Version = "v0.0.1" diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 63b0f08bef..d9aa3c42d6 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -186,6 +186,7 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } + // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 686bd2a09d..44ebd457cf 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -544,7 +544,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index d17f802092..b2271d0b7b 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -341,6 +341,26 @@ type Message interface { ProtoMessage() } +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index f48a756761..3b6ca41d5e 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -36,7 +36,13 @@ package proto */ import ( + "bytes" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -139,9 +145,46 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { +func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -179,3 +222,93 @@ func unmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index c9e5fa0207..04dcb8d9ef 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -391,6 +391,9 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { + if collectStats { + stats.Chit++ + } return sprop } @@ -403,8 +406,14 @@ func GetProperties(t reflect.Type) *StructProperties { // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } return prop } + if collectStats { + stats.Cmiss++ + } prop := new(StructProperties) // in case of recursive protos, fill this in now. diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go index 9b1538d055..ba58c49a43 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -491,7 +491,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go index bb2622f28c..e6b15c76ca 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -138,7 +138,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -2142,7 +2142,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) == 0 { + if len(b) <= 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go index eac1c7664f..532cc45e6d 100644 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -25,7 +25,7 @@ type Cache struct { // an item is evicted. Zero means no limit. MaxEntries int - // OnEvicted optionally specifies a callback function to be + // OnEvicted optionally specificies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key Key, value interface{}) diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index ada2b78e89..0000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,1271 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - stpb "github.com/golang/protobuf/ptypes/struct" -) - -const secondInNanos = int64(time.Second / time.Nanosecond) - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool - - // A custom URL resolver to use when marshaling Any messages to JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// AnyResolver takes a type URL, present in an Any message, and resolves it into -// an instance of the associated message. -type AnyResolver interface { - Resolve(typeUrl string) (proto.Message, error) -} - -func defaultResolveAny(typeUrl string) (proto.Message, error) { - // Only the part of typeUrl after the last slash is relevant. - mname := typeUrl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return nil, fmt.Errorf("unknown message type %q", mname) - } - return reflect.New(mt.Elem()).Interface().(proto.Message), nil -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should -// also implement JSONPBUnmarshaler so that the custom format can be -// parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize -// the way they are unmarshaled from JSON. Messages that implement this -// should also implement JSONPBMarshaler so that the custom format can be -// produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - v := reflect.ValueOf(pb) - if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return errors.New("Marshal called with nil") - } - // Check for unset required fields first. - if err := checkRequiredFields(pb); err != nil { - return err - } - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(1), - `"-Infinity"`: math.Inf(-1), -} - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - if jsm, ok := v.(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(m) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", v, err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - - out.write(string(b)) - return out.err - } - - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - if s < 0 { - ns = -ns - } - x := fmt.Sprintf("%d.%09d", s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct", "ListValue": - // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - case reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - var msg proto.Message - var err error - if m.AnyResolver != nil { - msg, err = m.AnyResolver.Resolve(turl) - } else { - msg, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error - v = reflect.Indirect(v) - - // Handle nil pointer - if v.Kind() == reflect.Invalid { - out.write("null") - return out.err - } - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - // TODO handle map key prop properly - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - vprop := prop - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - f := v.Float() - var sval string - switch { - case math.IsInf(f, 1): - sval = `"Infinity"` - case math.IsInf(f, -1): - sval = `"-Infinity"` - case math.IsNaN(f): - sval = `"NaN"` - } - if sval != "" { - out.write(sval) - return out.err - } - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool - - // A custom URL resolver to use when unmarshaling Any messages from JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { - return err - } - return checkRequiredFields(pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - // If input value is "null" and target is a pointer type, then the field should be treated as not set - // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. - _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) - if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { - return nil - } - target.Set(reflect.New(targetType.Elem())) - - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, []byte(inputValue)) - } - - // Handle well-known types that are not pointers. - if w, ok := target.Addr().Interface().(wkt); ok { - switch w.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - // Use json.RawMessage pointer type instead of value to support pre-1.8 version. - // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see - // https://github.com/golang/go/issues/14493 - var jsonFields map[string]*json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - val, ok := jsonFields["@type"] - if !ok || val == nil { - return errors.New("Any JSON doesn't have '@type'") - } - - var turl string - if err := json.Unmarshal([]byte(*val), &turl); err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) - } - target.Field(0).SetString(turl) - - var m proto.Message - var err error - if u.AnyResolver != nil { - m, err = u.AnyResolver.Resolve(turl) - } else { - m, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if _, ok := m.(wkt); ok { - val, ok := jsonFields["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - - if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } else { - delete(jsonFields, "@type") - nestedProto, err := json.Marshal(jsonFields) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - - if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } - - b, err := proto.Marshal(m) - if err != nil { - return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) - } - target.Field(1).SetBytes(b) - - return nil - case "Duration": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - target.Field(0).SetInt(t.Unix()) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Struct": - var m map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &m); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) - for k, jv := range m { - pv := &stpb.Value{} - if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) - } - target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) - } - return nil - case "ListValue": - var s []json.RawMessage - if err := json.Unmarshal(inputValue, &s); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) - for i, sv := range s { - if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { - return err - } - } - return nil - case "Value": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) - } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := unquote(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) - } else if v, err := strconv.ParseBool(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) - } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { - lv := &stpb.ListValue{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) - return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) - } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { - sv := &stpb.Struct{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) - return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) - } else { - return fmt.Errorf("unrecognized type for Value %q", ivStr) - } - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - if targetType.Kind() != reflect.Int32 { - return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - // Handle proto2 extensions. - if len(jsonFields) > 0 { - if ep, ok := target.Addr().Interface().(proto.Message); ok { - for _, ext := range proto.RegisteredExtensions(ep) { - name := fmt.Sprintf("[%s]", ext.Name) - raw, ok := jsonFields[name] - if !ok { - continue - } - delete(jsonFields, name) - nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) - if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { - return err - } - if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { - return err - } - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - if slc != nil { - l := len(slc) - target.Set(reflect.MakeSlice(targetType, l, l)) - for i := 0; i < l; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - if mp != nil { - target.Set(reflect.MakeMap(targetType)) - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - var kprop *proto.Properties - if prop != nil && prop.MapKeyProp != nil { - kprop = prop.MapKeyProp - } - if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - var vprop *proto.Properties - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := u.unmarshalValue(v, raw, vprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - } - return nil - } - - // Non-finite numbers can be encoded as strings. - isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isFloat { - if num, ok := nonFinite[string(inputValue)]; ok { - target.SetFloat(num) - return nil - } - } - - // integers & floats can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || - targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || - targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -func unquote(s string) (string, error) { - var ret string - err := json.Unmarshal([]byte(s), &ret) - return ret, err -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.String: - return s[i].String() < s[j].String() - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} - -// checkRequiredFields returns an error if any required field in the given proto message is not set. -// This function is used by both Marshal and Unmarshal. While required fields only exist in a -// proto2 message, a proto3 message can contain proto2 message(s). -func checkRequiredFields(pb proto.Message) error { - // Most well-known type messages do not contain required fields. The "Any" type may contain - // a message that has required fields. - // - // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value - // field in order to transform that into JSON, and that should have returned an error if a - // required field is not set in the embedded message. - // - // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the - // embedded message to store the serialized message in Any.Value field, and that should have - // returned an error if a required field is not set. - if _, ok := pb.(wkt); ok { - return nil - } - - v := reflect.ValueOf(pb) - // Skip message if it is not a struct pointer. - if v.Kind() != reflect.Ptr { - return nil - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return nil - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - sfield := v.Type().Field(i) - - if sfield.PkgPath != "" { - // blank PkgPath means the field is exported; skip if not exported - continue - } - - if strings.HasPrefix(sfield.Name, "XXX_") { - continue - } - - // Oneof field is an interface implemented by wrapper structs containing the actual oneof - // field, i.e. an interface containing &T{real_value}. - if sfield.Tag.Get("protobuf_oneof") != "" { - if field.Kind() != reflect.Interface { - continue - } - v := field.Elem() - if v.Kind() != reflect.Ptr || v.IsNil() { - continue - } - v = v.Elem() - if v.Kind() != reflect.Struct || v.NumField() < 1 { - continue - } - field = v.Field(0) - sfield = v.Type().Field(0) - } - - protoTag := sfield.Tag.Get("protobuf") - if protoTag == "" { - continue - } - var prop proto.Properties - prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) - - switch field.Kind() { - case reflect.Map: - if field.IsNil() { - continue - } - // Check each map value. - keys := field.MapKeys() - for _, k := range keys { - v := field.MapIndex(k) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Slice: - // Handle non-repeated type, e.g. bytes. - if !prop.Repeated { - if prop.Required && field.IsNil() { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - - // Handle repeated type. - if field.IsNil() { - continue - } - // Check each slice item. - for i := 0; i < field.Len(); i++ { - v := field.Index(i) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Ptr: - if field.IsNil() { - if prop.Required { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - if err := checkRequiredFieldsInValue(field); err != nil { - return err - } - } - } - - // Handle proto2 extensions. - for _, ext := range proto.RegisteredExtensions(pb) { - if !proto.HasExtension(pb, ext) { - continue - } - ep, err := proto.GetExtension(pb, ext) - if err != nil { - return err - } - err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) - if err != nil { - return err - } - } - - return nil -} - -func checkRequiredFieldsInValue(v reflect.Value) error { - if pm, ok := v.Interface().(proto.Message); ok { - return checkRequiredFields(pm) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 63b0f08bef..d9aa3c42d6 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,6 +186,7 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } + // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09a..0000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index f9b6e41b3c..d4db5a1c14 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,8 +246,7 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) + m1, m2 := e1.value, e2.value if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30a..816a3b9d6c 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,25 +185,9 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. + desc *ExtensionDesc value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + enc []byte } // SetRawExtension is for testing only. @@ -350,7 +334,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return extensionAsLegacyType(e.value), nil + return e.value, nil } if extension.ExtensionType == nil { @@ -365,11 +349,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) + e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil + return e.value, nil } // defaultExtensionValue returns the default value for extension. @@ -504,7 +488,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -516,7 +500,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + extmap[extension.Field] = Extension{desc: extension, value: value} return nil } @@ -557,51 +541,3 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index fdd328bb7f..75565cc6dc 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,6 +341,26 @@ type Message interface { ProtoMessage() } +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -940,19 +960,13 @@ func isProto3Zero(v reflect.Value) bool { return false } -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index f48a756761..3b6ca41d5e 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,7 +36,13 @@ package proto */ import ( + "bytes" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -139,9 +145,46 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { +func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -179,3 +222,93 @@ func unmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index 94fa9194a8..b6cad90834 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,13 +79,10 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { +func toAddrPointer(i *interface{}, isptr bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) - if deref { - u = u.Elem() - } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index dbfffe071b..d55a335d94 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,21 +85,16 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { +func toAddrPointer(i *interface{}, isptr bool) pointer { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c5..50b99b83a8 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,6 +334,9 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { + if collectStats { + stats.Chit++ + } return sprop } @@ -343,20 +346,17 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } return prop } + if collectStats { + stats.Cmiss++ + } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,14 +391,13 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } - if len(oots) > 0 { + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index 5cb11fa955..b16794496f 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,7 +87,6 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -321,11 +320,8 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -411,22 +407,13 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, - deref: deref, } // update cache @@ -461,7 +448,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -489,6 +476,10 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2319,8 +2310,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2338,8 +2329,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2408,7 +2399,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2443,7 +2434,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2474,7 +2465,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2519,7 +2510,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2562,7 +2553,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2600,7 +2591,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2630,7 +2621,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } return n @@ -2665,7 +2656,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index acee2fc529..ebf1caa56a 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,48 +362,46 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } } - } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } } } - } // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1950,7 +1948,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) == 0 { + if len(b) <= 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index 1ded05bbe7..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2887 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -package descriptor - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{0} -} - -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{1} -} - -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2} -} - -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4} -} - -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{5} -} - -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6} -} - -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6, 0} -} - -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{7} -} - -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{8} -} - -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{9} -} - -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18} -} - -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19} -} - -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19, 0} -} - -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20} -} - -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} - -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } - -var fileDescriptor_e5baabe45344a177 = []byte{ - // 2589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, - 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, - 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, - 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, - 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, - 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, - 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, - 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, - 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, - 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, - 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, - 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, - 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, - 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, - 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, - 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, - 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, - 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, - 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, - 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, - 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, - 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, - 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, - 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, - 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, - 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, - 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, - 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, - 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, - 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, - 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, - 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, - 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, - 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, - 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, - 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, - 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, - 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, - 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, - 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, - 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, - 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, - 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, - 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, - 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, - 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, - 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, - 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, - 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, - 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, - 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, - 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, - 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, - 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, - 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, - 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, - 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, - 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, - 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, - 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, - 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, - 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, - 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, - 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, - 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, - 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, - 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, - 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, - 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, - 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, - 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, - 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, - 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, - 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, - 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, - 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, - 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, - 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, - 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, - 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, - 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, - 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, - 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, - 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, - 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, - 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, - 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, - 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, - 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, - 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, - 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, - 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, - 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, - 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, - 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, - 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, - 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, - 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, - 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, - 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, - 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, - 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, - 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, - 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, - 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, - 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, - 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, - 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, - 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, - 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, - 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, - 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, - 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, - 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, - 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, - 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, - 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, - 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, - 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, - 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, - 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, - 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, - 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, - 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, - 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, - 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, - 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, - 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, - 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, - 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, - 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, - 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, - 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, - 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, - 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, - 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, - 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, - 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, - 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, - 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, - 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, - 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, - 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, - 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, - 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, - 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, - 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, - 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, - 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, - 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, - 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, - 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, - 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, - 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, - 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, - 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, - 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, - 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, - 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, - 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index ed08fcbc54..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,883 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go deleted file mode 100644 index 6f4a902b5b..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ /dev/null @@ -1,2806 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. -*/ -package generator - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/printer" - "go/token" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// proto package is introduced; the generated code references -// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 3 - -// A Plugin provides functionality to add to the output during Go code generation, -// such as to produce RPC stubs. -type Plugin interface { - // Name identifies the plugin. - Name() string - // Init is called once after data structures are built but before - // code generation begins. - Init(g *Generator) - // Generate produces the code generated by the plugin for this file, - // except for the imports, by calling the generator's methods P, In, and Out. - Generate(file *FileDescriptor) - // GenerateImports produces the import declarations for this file. - // It is called after Generate. - GenerateImports(file *FileDescriptor) -} - -var plugins []Plugin - -// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. -// It is typically called during initialization. -func RegisterPlugin(p Plugin) { - plugins = append(plugins, p) -} - -// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". -type GoImportPath string - -func (p GoImportPath) String() string { return strconv.Quote(string(p)) } - -// A GoPackageName is the name of a Go package. e.g., "protobuf". -type GoPackageName string - -// Each type we import as a protocol buffer (other than FileDescriptorProto) needs -// a pointer to the FileDescriptorProto that represents it. These types achieve that -// wrapping by placing each Proto inside a struct with the pointer to its File. The -// structs have the same names as their contents, with "Proto" removed. -// FileDescriptor is used to store the things that it points to. - -// The file and package name method are common to messages and enums. -type common struct { - file *FileDescriptor // File this object comes from. -} - -// GoImportPath is the import path of the Go package containing the type. -func (c *common) GoImportPath() GoImportPath { - return c.file.importPath -} - -func (c *common) File() *FileDescriptor { return c.file } - -func fileIsProto3(file *descriptor.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } - -// Descriptor represents a protocol buffer message. -type Descriptor struct { - common - *descriptor.DescriptorProto - parent *Descriptor // The containing message, if any. - nested []*Descriptor // Inner messages, if any. - enums []*EnumDescriptor // Inner enums, if any. - ext []*ExtensionDescriptor // Extensions, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or another message. - path string // The SourceCodeInfo path as comma-separated integers. - group bool -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (d *Descriptor) TypeName() []string { - if d.typename != nil { - return d.typename - } - n := 0 - for parent := d; parent != nil; parent = parent.parent { - n++ - } - s := make([]string, n) - for parent := d; parent != nil; parent = parent.parent { - n-- - s[n] = parent.GetName() - } - d.typename = s - return s -} - -// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type EnumDescriptor struct { - common - *descriptor.EnumDescriptorProto - parent *Descriptor // The containing message, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or a message. - path string // The SourceCodeInfo path as comma-separated integers. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *EnumDescriptor) TypeName() (s []string) { - if e.typename != nil { - return e.typename - } - name := e.GetName() - if e.parent == nil { - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - e.typename = s - return s -} - -// Everything but the last element of the full type name, CamelCased. -// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . -func (e *EnumDescriptor) prefix() string { - if e.parent == nil { - // If the enum is not part of a message, the prefix is just the type name. - return CamelCase(*e.Name) + "_" - } - typeName := e.TypeName() - return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" -} - -// The integer value of the named constant in this enumerated type. -func (e *EnumDescriptor) integerValueAsString(name string) string { - for _, c := range e.Value { - if c.GetName() == name { - return fmt.Sprint(c.GetNumber()) - } - } - log.Fatal("cannot find value for enum constant") - return "" -} - -// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type ExtensionDescriptor struct { - common - *descriptor.FieldDescriptorProto - parent *Descriptor // The containing message, if any. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *ExtensionDescriptor) TypeName() (s []string) { - name := e.GetName() - if e.parent == nil { - // top-level extension - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - return s -} - -// DescName returns the variable name used for the generated descriptor. -func (e *ExtensionDescriptor) DescName() string { - // The full type name. - typeName := e.TypeName() - // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. - for i, s := range typeName { - typeName[i] = CamelCase(s) - } - return "E_" + strings.Join(typeName, "_") -} - -// ImportedDescriptor describes a type that has been publicly imported from another file. -type ImportedDescriptor struct { - common - o Object -} - -func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } - -// FileDescriptor describes an protocol buffer descriptor file (.proto). -// It includes slices of all the messages and enums defined within it. -// Those slices are constructed by WrapTypes. -type FileDescriptor struct { - *descriptor.FileDescriptorProto - desc []*Descriptor // All the messages defined in this file. - enum []*EnumDescriptor // All the enums defined in this file. - ext []*ExtensionDescriptor // All the top-level extensions defined in this file. - imp []*ImportedDescriptor // All types defined in files publicly imported by this file. - - // Comments, stored as a map of path (comma-separated integers) to the comment. - comments map[string]*descriptor.SourceCodeInfo_Location - - // The full list of symbols that are exported, - // as a map from the exported object to its symbols. - // This is used for supporting public imports. - exported map[Object][]symbol - - importPath GoImportPath // Import path of this file's package. - packageName GoPackageName // Name of this file's Go package. - - proto3 bool // whether to generate proto3 code for this file -} - -// VarName is the variable name we'll use in the generated code to refer -// to the compressed bytes of this descriptor. It is not exported, so -// it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { - h := sha256.Sum256([]byte(d.GetName())) - return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { - opt := d.GetOptions().GetGoPackage() - if opt == "" { - return "", "", false - } - // A semicolon-delimited suffix delimits the import path and package name. - sc := strings.Index(opt, ";") - if sc >= 0 { - return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true - } - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(opt, "/") - if slash >= 0 { - return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true - } - return "", cleanPackageName(opt), true -} - -// goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName(pathType pathType) string { - name := *d.Name - if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += ".pb.go" - - if pathType == pathTypeSourceRelative { - return name - } - - // Does the file have a "go_package" option? - // If it does, it may override the filename. - if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(string(impPath), name) - return name - } - - return name -} - -func (d *FileDescriptor) addExport(obj Object, sym symbol) { - d.exported[obj] = append(d.exported[obj], sym) -} - -// symbol is an interface representing an exported Go symbol. -type symbol interface { - // GenerateAlias should generate an appropriate alias - // for the symbol from the named package. - GenerateAlias(g *Generator, filename string, pkg GoPackageName) -} - -type messageSymbol struct { - sym string - hasExtensions, isMessageSet bool - oneofTypes []string -} - -type getterSymbol struct { - name string - typ string - typeName string // canonical name in proto world; empty for proto.Message and similar - genType bool // whether typ contains a generated type (message/group/enum) -} - -func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - g.P("// ", ms.sym, " from public import ", filename) - g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) - for _, name := range ms.oneofTypes { - g.P("type ", name, " = ", pkg, ".", name) - } -} - -type enumSymbol struct { - name string - proto3 bool // Whether this came from a proto3 file. -} - -func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - s := es.name - g.P("// ", s, " from public import ", filename) - g.P("type ", s, " = ", pkg, ".", s) - g.P("var ", s, "_name = ", pkg, ".", s, "_name") - g.P("var ", s, "_value = ", pkg, ".", s, "_value") -} - -type constOrVarSymbol struct { - sym string - typ string // either "const" or "var" - cast string // if non-empty, a type cast is required (used for enums) -} - -func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - v := string(pkg) + "." + cs.sym - if cs.cast != "" { - v = cs.cast + "(" + v + ")" - } - g.P(cs.typ, " ", cs.sym, " = ", v) -} - -// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. -type Object interface { - GoImportPath() GoImportPath - TypeName() []string - File() *FileDescriptor -} - -// Generator is the type whose methods generate the output, stored in the associated response structure. -type Generator struct { - *bytes.Buffer - - Request *plugin.CodeGeneratorRequest // The input. - Response *plugin.CodeGeneratorResponse // The output. - - Param map[string]string // Command-line parameters. - PackageImportPath string // Go import path of the package we're generating code for - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path - - Pkg map[string]string // The names under which we import support packages - - outputImportPath GoImportPath // Package we're generating code for. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. - usedPackages map[GoImportPath]bool // Packages used in current file. - usedPackageNames map[GoPackageName]bool // Package names used in the current file. - addedImports map[GoImportPath]bool // Additional imports to emit. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. - indent string - pathType pathType // How to generate output filenames. - writeOutput bool - annotateCode bool // whether to store annotations - annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store -} - -type pathType int - -const ( - pathTypeImport pathType = iota - pathTypeSourceRelative -) - -// New creates a new generator and allocates the request and response protobufs. -func New() *Generator { - g := new(Generator) - g.Buffer = new(bytes.Buffer) - g.Request = new(plugin.CodeGeneratorRequest) - g.Response = new(plugin.CodeGeneratorResponse) - return g -} - -// Error reports a problem, including an error, and exits the program. -func (g *Generator) Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// Fail reports a problem and exits the program. -func (g *Generator) Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// CommandLineParameters breaks the comma-separated list of key=value pairs -// in the parameter (a member of the request protobuf) into a key/value map. -// It then sets file name mappings defined by those entries. -func (g *Generator) CommandLineParameters(parameter string) { - g.Param = make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if i := strings.Index(p, "="); i < 0 { - g.Param[p] = "" - } else { - g.Param[p[0:i]] = p[i+1:] - } - } - - g.ImportMap = make(map[string]string) - pluginList := "none" // Default list of plugin names to enable (empty means all). - for k, v := range g.Param { - switch k { - case "import_prefix": - g.ImportPrefix = v - case "import_path": - g.PackageImportPath = v - case "paths": - switch v { - case "import": - g.pathType = pathTypeImport - case "source_relative": - g.pathType = pathTypeSourceRelative - default: - g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) - } - case "plugins": - pluginList = v - case "annotate_code": - if v == "true" { - g.annotateCode = true - } - default: - if len(k) > 0 && k[0] == 'M' { - g.ImportMap[k[1:]] = v - } - } - } - if pluginList != "" { - // Amend the set of plugins. - enabled := make(map[string]bool) - for _, name := range strings.Split(pluginList, "+") { - enabled[name] = true - } - var nplugins []Plugin - for _, p := range plugins { - if enabled[p.Name()] { - nplugins = append(nplugins, p) - } - } - plugins = nplugins - } -} - -// DefaultPackageName returns the package name printed for the object. -// If its file is in a different package, it returns the package name we're using for this file, plus ".". -// Otherwise it returns the empty string. -func (g *Generator) DefaultPackageName(obj Object) string { - importPath := obj.GoImportPath() - if importPath == g.outputImportPath { - return "" - } - return string(g.GoPackageName(importPath)) + "." -} - -// GoPackageName returns the name used for a package. -func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { - if name, ok := g.packageNames[importPath]; ok { - return name - } - name := cleanPackageName(baseName(string(importPath))) - for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - g.packageNames[importPath] = name - g.usedPackageNames[name] = true - return name -} - -// AddImport adds a package to the generated file's import section. -// It returns the name used for the package. -func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { - g.addedImports[importPath] = true - return g.GoPackageName(importPath) -} - -var globalPackageNames = map[GoPackageName]bool{ - "fmt": true, - "math": true, - "proto": true, -} - -// Create and remember a guaranteed unique package name. Pkg is the candidate name. -// The FileDescriptor parameter is unused. -func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - name := cleanPackageName(pkg) - for i, orig := 1, name; globalPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - globalPackageNames[name] = true - return string(name) -} - -var isGoKeyword = map[string]bool{ - "break": true, - "case": true, - "chan": true, - "const": true, - "continue": true, - "default": true, - "else": true, - "defer": true, - "fallthrough": true, - "for": true, - "func": true, - "go": true, - "goto": true, - "if": true, - "import": true, - "interface": true, - "map": true, - "package": true, - "range": true, - "return": true, - "select": true, - "struct": true, - "switch": true, - "type": true, - "var": true, -} - -var isGoPredeclaredIdentifier = map[string]bool{ - "append": true, - "bool": true, - "byte": true, - "cap": true, - "close": true, - "complex": true, - "complex128": true, - "complex64": true, - "copy": true, - "delete": true, - "error": true, - "false": true, - "float32": true, - "float64": true, - "imag": true, - "int": true, - "int16": true, - "int32": true, - "int64": true, - "int8": true, - "iota": true, - "len": true, - "make": true, - "new": true, - "nil": true, - "panic": true, - "print": true, - "println": true, - "real": true, - "recover": true, - "rune": true, - "string": true, - "true": true, - "uint": true, - "uint16": true, - "uint32": true, - "uint64": true, - "uint8": true, - "uintptr": true, -} - -func cleanPackageName(name string) GoPackageName { - name = strings.Map(badToUnderscore, name) - // Identifier must not be keyword or predeclared identifier: insert _. - if isGoKeyword[name] { - name = "_" + name - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { - name = "_" + name - } - return GoPackageName(name) -} - -// defaultGoPackage returns the package name to use, -// derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() GoPackageName { - p := g.PackageImportPath - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - return cleanPackageName(p) -} - -// SetPackageNames sets the package name for this run. -// The package name must agree across all files being generated. -// It also defines unique package names for all imported files. -func (g *Generator) SetPackageNames() { - g.outputImportPath = g.genFiles[0].importPath - - defaultPackageNames := make(map[GoImportPath]GoPackageName) - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - defaultPackageNames[f.importPath] = p - } - } - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - // Source file: option go_package = "quux/bar"; - f.packageName = p - } else if p, ok := defaultPackageNames[f.importPath]; ok { - // A go_package option in another file in the same package. - // - // This is a poor choice in general, since every source file should - // contain a go_package option. Supported mainly for historical - // compatibility. - f.packageName = p - } else if p := g.defaultGoPackage(); p != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets a package name for files which don't - // contain a go_package option. - f.packageName = p - } else if p := f.GetPackage(); p != "" { - // Source file: package quux.bar; - f.packageName = cleanPackageName(p) - } else { - // Source filename. - f.packageName = cleanPackageName(baseName(f.GetName())) - } - } - - // Check that all files have a consistent package name and import path. - for _, f := range g.genFiles[1:] { - if a, b := g.genFiles[0].importPath, f.importPath; a != b { - g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) - } - if a, b := g.genFiles[0].packageName, f.packageName; a != b { - g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) - } - } - - // Names of support packages. These never vary (if there are conflicts, - // we rename the conflicting package), so this could be removed someday. - g.Pkg = map[string]string{ - "fmt": "fmt", - "math": "math", - "proto": "proto", - } -} - -// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos -// and FileDescriptorProtos into file-referenced objects within the Generator. -// It also creates the list of files to generate and so should be called before GenerateAllFiles. -func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) - g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - genFileNames := make(map[string]bool) - for _, n := range g.Request.FileToGenerate { - genFileNames[n] = true - } - for _, f := range g.Request.ProtoFile { - fd := &FileDescriptor{ - FileDescriptorProto: f, - exported: make(map[Object][]symbol), - proto3: fileIsProto3(f), - } - // The import path may be set in a number of ways. - if substitution, ok := g.ImportMap[f.GetName()]; ok { - // Command-line: M=foo.proto=quux/bar. - // - // Explicit mapping of source file to import path. - fd.importPath = GoImportPath(substitution) - } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets the import path for every file that - // we generate code for. - fd.importPath = GoImportPath(g.PackageImportPath) - } else if p, _, _ := fd.goPackageOption(); p != "" { - // Source file: option go_package = "quux/bar"; - // - // The go_package option sets the import path. Most users should use this. - fd.importPath = p - } else { - // Source filename. - // - // Last resort when nothing else is available. - fd.importPath = GoImportPath(path.Dir(f.GetName())) - } - // We must wrap the descriptors before we wrap the enums - fd.desc = wrapDescriptors(fd) - g.buildNestedDescriptors(fd.desc) - fd.enum = wrapEnumDescriptors(fd, fd.desc) - g.buildNestedEnums(fd.desc, fd.enum) - fd.ext = wrapExtensions(fd) - extractComments(fd) - g.allFiles = append(g.allFiles, fd) - g.allFilesByName[f.GetName()] = fd - } - for _, fd := range g.allFiles { - fd.imp = wrapImported(fd, g) - } - - g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) - for _, fileName := range g.Request.FileToGenerate { - fd := g.allFilesByName[fileName] - if fd == nil { - g.Fail("could not find file named", fileName) - } - g.genFiles = append(g.genFiles, fd) - } -} - -// Scan the descriptors in this file. For each one, build the slice of nested descriptors -func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { - for _, desc := range descs { - if len(desc.NestedType) != 0 { - for _, nest := range descs { - if nest.parent == desc { - desc.nested = append(desc.nested, nest) - } - } - if len(desc.nested) != len(desc.NestedType) { - g.Fail("internal error: nesting failure for", desc.GetName()) - } - } - } -} - -func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { - for _, desc := range descs { - if len(desc.EnumType) != 0 { - for _, enum := range enums { - if enum.parent == desc { - desc.enums = append(desc.enums, enum) - } - } - if len(desc.enums) != len(desc.EnumType) { - g.Fail("internal error: enum nesting failure for", desc.GetName()) - } - } - } -} - -// Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { - d := &Descriptor{ - common: common{file}, - DescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - d.path = fmt.Sprintf("%d,%d", messagePath, index) - } else { - d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) - } - - // The only way to distinguish a group from a message is whether - // the containing message has a TYPE_GROUP field that matches. - if parent != nil { - parts := d.TypeName() - if file.Package != nil { - parts = append([]string{*file.Package}, parts...) - } - exp := "." + strings.Join(parts, ".") - for _, field := range parent.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { - d.group = true - break - } - } - } - - for _, field := range desc.Extension { - d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) - } - - return d -} - -// Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *FileDescriptor) []*Descriptor { - sl := make([]*Descriptor, 0, len(file.MessageType)+10) - for i, desc := range file.MessageType { - sl = wrapThisDescriptor(sl, desc, nil, file, i) - } - return sl -} - -// Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { - sl = append(sl, newDescriptor(desc, parent, file, index)) - me := sl[len(sl)-1] - for i, nested := range desc.NestedType { - sl = wrapThisDescriptor(sl, nested, me, file, i) - } - return sl -} - -// Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { - ed := &EnumDescriptor{ - common: common{file}, - EnumDescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - ed.path = fmt.Sprintf("%d,%d", enumPath, index) - } else { - ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) - } - return ed -} - -// Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { - sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) - // Top-level enums. - for i, enum := range file.EnumType { - sl = append(sl, newEnumDescriptor(enum, nil, file, i)) - } - // Enums within messages. Enums within embedded messages appear in the outer-most message. - for _, nested := range descs { - for i, enum := range nested.EnumType { - sl = append(sl, newEnumDescriptor(enum, nested, file, i)) - } - } - return sl -} - -// Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { - var sl []*ExtensionDescriptor - for _, field := range file.Extension { - sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) - } - return sl -} - -// Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { - for _, index := range file.PublicDependency { - df := g.fileByName(file.Dependency[index]) - for _, d := range df.desc { - if d.GetOptions().GetMapEntry() { - continue - } - sl = append(sl, &ImportedDescriptor{common{file}, d}) - } - for _, e := range df.enum { - sl = append(sl, &ImportedDescriptor{common{file}, e}) - } - for _, ext := range df.ext { - sl = append(sl, &ImportedDescriptor{common{file}, ext}) - } - } - return -} - -func extractComments(file *FileDescriptor) { - file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) - for _, loc := range file.GetSourceCodeInfo().GetLocation() { - if loc.LeadingComments == nil { - continue - } - var p []string - for _, n := range loc.Path { - p = append(p, strconv.Itoa(int(n))) - } - file.comments[strings.Join(p, ",")] = loc - } -} - -// BuildTypeNameMap builds the map from fully qualified type names to objects. -// The key names for the map come from the input data, which puts a period at the beginning. -// It should be called after SetPackageNames and before GenerateAllFiles. -func (g *Generator) BuildTypeNameMap() { - g.typeNameToObject = make(map[string]Object) - for _, f := range g.allFiles { - // The names in this loop are defined by the proto world, not us, so the - // package name may be empty. If so, the dotted package name of X will - // be ".X"; otherwise it will be ".pkg.X". - dottedPkg := "." + f.GetPackage() - if dottedPkg != "." { - dottedPkg += "." - } - for _, enum := range f.enum { - name := dottedPkg + dottedSlice(enum.TypeName()) - g.typeNameToObject[name] = enum - } - for _, desc := range f.desc { - name := dottedPkg + dottedSlice(desc.TypeName()) - g.typeNameToObject[name] = desc - } - } -} - -// ObjectNamed, given a fully-qualified input type name as it appears in the input data, -// returns the descriptor for the message or enum with that name. -func (g *Generator) ObjectNamed(typeName string) Object { - o, ok := g.typeNameToObject[typeName] - if !ok { - g.Fail("can't find object with type", typeName) - } - return o -} - -// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. -type AnnotatedAtoms struct { - source string - path string - atoms []interface{} -} - -// Annotate records the file name and proto AST path of a list of atoms -// so that a later call to P can emit a link from each atom to its origin. -func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { - return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} -} - -// printAtom prints the (atomic, non-annotation) argument to the generated output. -func (g *Generator) printAtom(v interface{}) { - switch v := v.(type) { - case string: - g.WriteString(v) - case *string: - g.WriteString(*v) - case bool: - fmt.Fprint(g, v) - case *bool: - fmt.Fprint(g, *v) - case int: - fmt.Fprint(g, v) - case *int32: - fmt.Fprint(g, *v) - case *int64: - fmt.Fprint(g, *v) - case float64: - fmt.Fprint(g, v) - case *float64: - fmt.Fprint(g, *v) - case GoPackageName: - g.WriteString(string(v)) - case GoImportPath: - g.WriteString(strconv.Quote(string(v))) - default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) - } -} - -// P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit -// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode -// is true). -func (g *Generator) P(str ...interface{}) { - if !g.writeOutput { - return - } - g.WriteString(g.indent) - for _, v := range str { - switch v := v.(type) { - case *AnnotatedAtoms: - begin := int32(g.Len()) - for _, v := range v.atoms { - g.printAtom(v) - } - if g.annotateCode { - end := int32(g.Len()) - var path []int32 - for _, token := range strings.Split(v.path, ",") { - val, err := strconv.ParseInt(token, 10, 32) - if err != nil { - g.Fail("could not parse proto AST path: ", err.Error()) - } - path = append(path, int32(val)) - } - g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ - Path: path, - SourceFile: &v.source, - Begin: &begin, - End: &end, - }) - } - default: - g.printAtom(v) - } - } - g.WriteByte('\n') -} - -// addInitf stores the given statement to be printed inside the file's init function. -// The statement is given as a format specifier and arguments. -func (g *Generator) addInitf(stmt string, a ...interface{}) { - g.init = append(g.init, fmt.Sprintf(stmt, a...)) -} - -// In Indents the output one tab stop. -func (g *Generator) In() { g.indent += "\t" } - -// Out unindents the output one tab stop. -func (g *Generator) Out() { - if len(g.indent) > 0 { - g.indent = g.indent[1:] - } -} - -// GenerateAllFiles generates the output for all the files we're outputting. -func (g *Generator) GenerateAllFiles() { - // Initialize the plugins - for _, p := range plugins { - p.Init(g) - } - // Generate the output. The generator runs for every file, even the files - // that we don't generate output for, so that we can collate the full list - // of exported symbols to support public imports. - genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) - for _, file := range g.genFiles { - genFileMap[file] = true - } - for _, file := range g.allFiles { - g.Reset() - g.annotations = nil - g.writeOutput = genFileMap[file] - g.generate(file) - if !g.writeOutput { - continue - } - fname := file.goFileName(g.pathType) - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(fname), - Content: proto.String(g.String()), - }) - if g.annotateCode { - // Store the generated code annotations in text, as the protoc plugin protocol requires that - // strings contain valid UTF-8. - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName(g.pathType) + ".meta"), - Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), - }) - } - } -} - -// Run all the plugins associated with the file. -func (g *Generator) runPlugins(file *FileDescriptor) { - for _, p := range plugins { - p.Generate(file) - } -} - -// Fill the response protocol buffer with the generated output for all the files we're -// supposed to generate. -func (g *Generator) generate(file *FileDescriptor) { - g.file = file - g.usedPackages = make(map[GoImportPath]bool) - g.packageNames = make(map[GoImportPath]GoPackageName) - g.usedPackageNames = make(map[GoPackageName]bool) - g.addedImports = make(map[GoImportPath]bool) - for name := range globalPackageNames { - g.usedPackageNames[name] = true - } - - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - - for _, td := range g.file.imp { - g.generateImported(td) - } - for _, enum := range g.file.enum { - g.generateEnum(enum) - } - for _, desc := range g.file.desc { - // Don't generate virtual messages for maps. - if desc.GetOptions().GetMapEntry() { - continue - } - g.generateMessage(desc) - } - for _, ext := range g.file.ext { - g.generateExtension(ext) - } - g.generateInitFunction() - g.generateFileDescriptor(file) - - // Run the plugins before the imports so we know which imports are necessary. - g.runPlugins(file) - - // Generate header and imports last, though they appear first in the output. - rem := g.Buffer - remAnno := g.annotations - g.Buffer = new(bytes.Buffer) - g.annotations = nil - g.generateHeader() - g.generateImports() - if !g.writeOutput { - return - } - // Adjust the offsets for annotations displaced by the header and imports. - for _, anno := range remAnno { - *anno.Begin += int32(g.Len()) - *anno.End += int32(g.Len()) - g.annotations = append(g.annotations, anno) - } - g.Write(rem.Bytes()) - - // Reformat generated code and patch annotation locations. - fset := token.NewFileSet() - original := g.Bytes() - if g.annotateCode { - // make a copy independent of g; we'll need it after Reset. - original = append([]byte(nil), original...) - } - fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(original)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - ast.SortImports(fset, fileAST) - g.Reset() - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) - if err != nil { - g.Fail("generated Go source code could not be reformatted:", err.Error()) - } - if g.annotateCode { - m, err := remap.Compute(original, g.Bytes()) - if err != nil { - g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) - } - for _, anno := range g.annotations { - new, ok := m.Find(int(*anno.Begin), int(*anno.End)) - if !ok { - g.Fail("span in formatted generated Go source code could not be mapped back to the original code") - } - *anno.Begin = int32(new.Pos) - *anno.End = int32(new.End) - } - } -} - -// Generate the header, including package definition -func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - if g.file.GetOptions().GetDeprecated() { - g.P("// ", g.file.Name, " is a deprecated file.") - } else { - g.P("// source: ", g.file.Name) - } - g.P() - g.PrintComments(strconv.Itoa(packagePath)) - g.P() - g.P("package ", g.file.packageName) - g.P() -} - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// PrintComments prints any comments from the source .proto file. -// The path is a comma-separated list of integers. -// It returns an indication of whether any comments were printed. -// See descriptor.proto for its format. -func (g *Generator) PrintComments(path string) bool { - if !g.writeOutput { - return false - } - if c, ok := g.makeComments(path); ok { - g.P(c) - return true - } - return false -} - -// makeComments generates the comment string for the field, no "\n" at the end -func (g *Generator) makeComments(path string) (string, bool) { - loc, ok := g.file.comments[path] - if !ok { - return "", false - } - w := new(bytes.Buffer) - nl := "" - for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { - fmt.Fprintf(w, "%s//%s", nl, line) - nl = "\n" - } - return w.String(), true -} - -func (g *Generator) fileByName(filename string) *FileDescriptor { - return g.allFilesByName[filename] -} - -// weak returns whether the ith import of the current file is a weak import. -func (g *Generator) weak(i int32) bool { - for _, j := range g.file.WeakDependency { - if j == i { - return true - } - } - return false -} - -// Generate the imports -func (g *Generator) generateImports() { - imports := make(map[GoImportPath]GoPackageName) - for i, s := range g.file.Dependency { - fd := g.fileByName(s) - importPath := fd.importPath - // Do not import our own package. - if importPath == g.file.importPath { - continue - } - // Do not import weak imports. - if g.weak(int32(i)) { - continue - } - // Do not import a package twice. - if _, ok := imports[importPath]; ok { - continue - } - // We need to import all the dependencies, even if we don't reference them, - // because other code and tools depend on having the full transitive closure - // of protocol buffer types in the binary. - packageName := g.GoPackageName(importPath) - if _, ok := g.usedPackages[importPath]; !ok { - packageName = "_" - } - imports[importPath] = packageName - } - for importPath := range g.addedImports { - imports[importPath] = g.GoPackageName(importPath) - } - // We almost always need a proto import. Rather than computing when we - // do, which is tricky when there's a plugin, just import it and - // reference it later. The same argument applies to the fmt and math packages. - g.P("import (") - g.P(g.Pkg["fmt"] + ` "fmt"`) - g.P(g.Pkg["math"] + ` "math"`) - g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") - for importPath, packageName := range imports { - g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) - } - g.P(")") - g.P() - // TODO: may need to worry about uniqueness across plugins - for _, p := range plugins { - p.GenerateImports(g.file) - g.P() - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ = ", g.Pkg["proto"], ".Marshal") - g.P("var _ = ", g.Pkg["fmt"], ".Errorf") - g.P("var _ = ", g.Pkg["math"], ".Inf") - g.P() -} - -func (g *Generator) generateImported(id *ImportedDescriptor) { - df := id.o.File() - filename := *df.Name - if df.importPath == g.file.importPath { - // Don't generate type aliases for files in the same Go package as this one. - return - } - if !supportTypeAliases { - g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) - } - g.usedPackages[df.importPath] = true - - for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) - } - - g.P() -} - -// Generate the enum definitions for this EnumDescriptor. -func (g *Generator) generateEnum(enum *EnumDescriptor) { - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - ccPrefix := enum.prefix() - - deprecatedEnum := "" - if enum.GetOptions().GetDeprecated() { - deprecatedEnum = deprecationComment - } - g.PrintComments(enum.path) - g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - for i, e := range enum.Value { - etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) - g.PrintComments(etorPath) - - deprecatedValue := "" - if e.GetOptions().GetDeprecated() { - deprecatedValue = deprecationComment - } - - name := ccPrefix + *e.Name - g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) - } - g.P(")") - g.P() - g.P("var ", ccTypeName, "_name = map[int32]string{") - generated := make(map[int32]bool) // avoid duplicate values - for _, e := range enum.Value { - duplicate := "" - if _, present := generated[*e.Number]; present { - duplicate = "// Duplicate value: " - } - g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") - generated[*e.Number] = true - } - g.P("}") - g.P() - g.P("var ", ccTypeName, "_value = map[string]int32{") - for _, e := range enum.Value { - g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") - } - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") - g.P("p := new(", ccTypeName, ")") - g.P("*p = x") - g.P("return p") - g.P("}") - g.P() - } - - g.P("func (x ", ccTypeName, ") String() string {") - g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") - g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) - g.P("if err != nil {") - g.P("return err") - g.P("}") - g.P("*x = ", ccTypeName, "(value)") - g.P("return nil") - g.P("}") - g.P() - } - - var indexes []string - for m := enum.parent; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { - g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) - g.P() - } - - g.generateEnumRegistration(enum) -} - -// The tag is a string like "varint,2,opt,name=fieldname,def=7" that -// identifies details of the field for the protocol buffer marshaling and unmarshaling -// code. The fields are: -// wire encoding -// protocol tag number -// opt,req,rep for optional, required, or repeated -// packed whether the encoding is "packed" (optional; repeated primitives only) -// name= the original declared name -// enum= the name of the enum type if it is an enum-typed field. -// proto3 if this field is in a proto3 message -// def= string representation of the default value, if any. -// The default value must be in a representation that can be used at run-time -// to generate the default value. Thus bools become 0 and 1, for instance. -func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { - optrepreq := "" - switch { - case isOptional(field): - optrepreq = "opt" - case isRequired(field): - optrepreq = "req" - case isRepeated(field): - optrepreq = "rep" - } - var defaultValue string - if dv := field.DefaultValue; dv != nil { // set means an explicit default - defaultValue = *dv - // Some types need tweaking. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - if defaultValue == "true" { - defaultValue = "1" - } else { - defaultValue = "0" - } - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - // Nothing to do. Quoting is done for the whole tag. - case descriptor.FieldDescriptorProto_TYPE_ENUM: - // For enums we need to provide the integer constant. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - // It is an enum that was publicly imported. - // We need the underlying type. - obj = id.o - } - enum, ok := obj.(*EnumDescriptor) - if !ok { - log.Printf("obj is a %T", obj) - if id, ok := obj.(*ImportedDescriptor); ok { - log.Printf("id.o is a %T", id.o) - } - g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) - } - defaultValue = enum.integerValueAsString(defaultValue) - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { - defaultValue = fmt.Sprint(float32(f)) - } - } - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { - defaultValue = fmt.Sprint(f) - } - } - } - defaultValue = ",def=" + defaultValue - } - enum := "" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.GoPackageName(), because we want to use the - // original (proto-world) package name. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - obj = id.o - } - enum = ",enum=" - if pkg := obj.File().GetPackage(); pkg != "" { - enum += pkg + "." - } - enum += CamelCaseSlice(obj.TypeName()) - } - packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { - packed = ",packed" - } - fieldName := field.GetName() - name := fieldName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - // We must use the type name for groups instead of - // the field name to preserve capitalization. - // type_name in FieldDescriptorProto is fully-qualified, - // but we only want the local part. - name = *field.TypeName - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - } - if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { - // TODO: escaping might be needed, in which case - // perhaps this should be in its own "json" tag. - name += ",json=" + json - } - name = ",name=" + name - if message.proto3() { - name += ",proto3" - } - oneof := "" - if field.OneofIndex != nil { - oneof = ",oneof" - } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", - wiretype, - field.GetNumber(), - optrepreq, - packed, - name, - enum, - oneof, - defaultValue)) -} - -func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { - switch typ { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - return false - case descriptor.FieldDescriptorProto_TYPE_BYTES: - return false - } - return true -} - -// TypeName is the printed name appropriate for an item. If the object is in the current file, -// TypeName drops the package name and underscores the rest. -// Otherwise the object is from another package; and the result is the underscored -// package name followed by the item name. -// The result always has an initial capital. -func (g *Generator) TypeName(obj Object) string { - return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) -} - -// GoType returns a string representing the type name, and the wire type -func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { - // TODO: Options. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typ, wire = "float64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typ, wire = "float32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_INT64: - typ, wire = "int64", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - typ, wire = "uint64", "varint" - case descriptor.FieldDescriptorProto_TYPE_INT32: - typ, wire = "int32", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - typ, wire = "uint32", "varint" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - typ, wire = "uint64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - typ, wire = "uint32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typ, wire = "bool", "varint" - case descriptor.FieldDescriptorProto_TYPE_STRING: - typ, wire = "string", "bytes" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "group" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "bytes" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - typ, wire = "[]byte", "bytes" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = g.TypeName(desc), "varint" - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - typ, wire = "int32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - typ, wire = "int64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - typ, wire = "int32", "zigzag32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - typ, wire = "int64", "zigzag64" - default: - g.Fail("unknown type for", field.GetName()) - } - if isRepeated(field) { - typ = "[]" + typ - } else if message != nil && message.proto3() { - return - } else if field.OneofIndex != nil && message != nil { - return - } else if needsStar(*field.Type) { - typ = "*" + typ - } - return -} - -func (g *Generator) RecordTypeUse(t string) { - if _, ok := g.typeNameToObject[t]; !ok { - return - } - importPath := g.ObjectNamed(t).GoImportPath() - if importPath == g.outputImportPath { - // Don't record use of objects in our package. - return - } - g.AddImport(importPath) - g.usedPackages[importPath] = true -} - -// Method names that may be generated. Fields with these names get an -// underscore appended. Any change to this set is a potential incompatible -// API change because it changes generated field names. -var methodNames = [...]string{ - "Reset", - "String", - "ProtoMessage", - "Marshal", - "Unmarshal", - "ExtensionRangeArray", - "ExtensionMap", - "Descriptor", -} - -// Names of messages in the `google.protobuf` package for which -// we will generate XXX_WellKnownType methods. -var wellKnownTypes = map[string]bool{ - "Any": true, - "Duration": true, - "Empty": true, - "Struct": true, - "Timestamp": true, - - "Value": true, - "ListValue": true, - "DoubleValue": true, - "FloatValue": true, - "Int64Value": true, - "UInt64Value": true, - "Int32Value": true, - "UInt32Value": true, - "BoolValue": true, - "StringValue": true, - "BytesValue": true, -} - -// getterDefault finds the default value for the field to return from a getter, -// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" -func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { - if isRepeated(field) { - return "nil" - } - if def := field.GetDefaultValue(); def != "" { - defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) - if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { - return defaultConstant - } - return "append([]byte(nil), " + defaultConstant + "...)" - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - return "false" - case descriptor.FieldDescriptorProto_TYPE_STRING: - return `""` - case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: - return "nil" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - obj := g.ObjectNamed(field.GetTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate getter for %s", field.GetName()) - return "nil" - } - if len(enum.Value) == 0 { - return "0 // empty enum" - } - first := enum.Value[0].GetName() - return g.DefaultPackageName(obj) + enum.prefix() + first - default: - return "0" - } -} - -// defaultConstantName builds the name of the default constant from the message -// type name and the untouched field name, e.g. "Default_MessageType_FieldName" -func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { - return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) -} - -// The different types of fields in a message and how to actually print them -// Most of the logic for generateMessage is in the methods of these types. -// -// Note that the content of the field is irrelevant, a simpleField can contain -// anything from a scalar to a group (which is just a message). -// -// Extension fields (and message sets) are however handled separately. -// -// simpleField - a field that is neiter weak nor oneof, possibly repeated -// oneofField - field containing list of subfields: -// - oneofSubField - a field within the oneof - -// msgCtx contains the context for the generator functions. -type msgCtx struct { - goName string // Go struct name of the message, e.g. MessageName - message *Descriptor // The descriptor for the message -} - -// fieldCommon contains data common to all types of fields. -type fieldCommon struct { - goName string // Go name of field, e.g. "FieldName" or "Descriptor_" - protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" - getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" - goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" - tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` - fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" -} - -// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". -func (f *fieldCommon) getProtoName() string { - return f.protoName -} - -// getGoType returns the go type of the field as a string, e.g. "*int32". -func (f *fieldCommon) getGoType() string { - return f.goType -} - -// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. -type simpleField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - comment string // The full comment for the field, e.g. "// Useful information" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *simpleField) decl(g *Generator, mc *msgCtx) { - g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) -} - -// getter prints the getter for the field. -func (f *simpleField) getter(g *Generator, mc *msgCtx) { - star := "" - tname := f.goType - if needsStar(f.protoType) && tname[0] == '*' { - tname = tname[1:] - star = "*" - } - if f.deprecated != "" { - g.P(f.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") - if f.getterDef == "nil" { // Simpler getter - g.P("if m != nil {") - g.P("return m." + f.goName) - g.P("}") - g.P("return nil") - g.P("}") - g.P() - return - } - if mc.message.proto3() { - g.P("if m != nil {") - } else { - g.P("if m != nil && m." + f.goName + " != nil {") - } - g.P("return " + star + "m." + f.goName) - g.P("}") - g.P("return ", f.getterDef) - g.P("}") - g.P() -} - -// setter prints the setter method of the field. -func (f *simpleField) setter(g *Generator, mc *msgCtx) { - // No setter for regular fields yet -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *simpleField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *simpleField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. -type oneofSubField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" - fieldNumber int // Actual field number, as defined in proto, e.g. 12 - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - deprecated string // Deprecation comment, if any. -} - -// typedNil prints a nil casted to the pointer to this field. -// - for XXX_OneofWrappers -func (f *oneofSubField) typedNil(g *Generator) { - g.P("(*", f.oneofTypeName, ")(nil),") -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *oneofSubField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *oneofSubField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofField represents the oneof on top level. -// The alternative fields within the oneof are represented by oneofSubField. -type oneofField struct { - fieldCommon - subFields []*oneofSubField // All the possible oneof fields - comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *oneofField) decl(g *Generator, mc *msgCtx) { - comment := f.comment - for _, sf := range f.subFields { - comment += "//\t*" + sf.oneofTypeName + "\n" - } - g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") -} - -// getter for a oneof field will print additional discriminators and interfaces for the oneof, -// also it prints all the getters for the sub fields. -func (f *oneofField) getter(g *Generator, mc *msgCtx) { - // The discriminator type - g.P("type ", f.goType, " interface {") - g.P(f.goType, "()") - g.P("}") - g.P() - // The subField types, fulfilling the discriminator type contract - for _, sf := range f.subFields { - g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") - g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") - g.P("}") - g.P() - } - for _, sf := range f.subFields { - g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") - g.P() - } - // Getter for the oneof field - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") - g.P("if m != nil { return m.", f.goName, " }") - g.P("return nil") - g.P("}") - g.P() - // Getters for each oneof - for _, sf := range f.subFields { - if sf.deprecated != "" { - g.P(sf.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") - g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") - g.P("return x.", sf.goName) - g.P("}") - g.P("return ", sf.getterDef) - g.P("}") - g.P() - } -} - -// setter prints the setter method of the field. -func (f *oneofField) setter(g *Generator, mc *msgCtx) { - // No setters for oneof yet -} - -// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). -type topLevelField interface { - decl(g *Generator, mc *msgCtx) // print declaration within the struct - getter(g *Generator, mc *msgCtx) // print getter - setter(g *Generator, mc *msgCtx) // print setter if applicable -} - -// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). -type defField interface { - getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" - getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" - getGoType() string // go type of the field as a string, e.g. "*int32" - getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" - getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 -} - -// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. -// explicit in the proto. -func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { - // Collect fields that can have defaults - dFields := []defField{} - for _, pf := range topLevelFields { - if f, ok := pf.(*oneofField); ok { - for _, osf := range f.subFields { - dFields = append(dFields, osf) - } - continue - } - dFields = append(dFields, pf.(defField)) - } - for _, df := range dFields { - def := df.getProtoDef() - if def == "" { - continue - } - fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) - typename := df.getGoType() - if typename[0] == '*' { - typename = typename[1:] - } - kind := "const " - switch { - case typename == "bool": - case typename == "string": - def = strconv.Quote(def) - case typename == "[]byte": - def = "[]byte(" + strconv.Quote(unescape(def)) + ")" - kind = "var " - case def == "inf", def == "-inf", def == "nan": - // These names are known to, and defined by, the protocol language. - switch def { - case "inf": - def = "math.Inf(1)" - case "-inf": - def = "math.Inf(-1)" - case "nan": - def = "math.NaN()" - } - if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { - def = "float32(" + def + ")" - } - kind = "var " - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: - if f, err := strconv.ParseFloat(def, 32); err == nil { - def = fmt.Sprint(float32(f)) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if f, err := strconv.ParseFloat(def, 64); err == nil { - def = fmt.Sprint(f) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: - // Must be an enum. Need to construct the prefixed name. - obj := g.ObjectNamed(df.getProtoTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate constant for %s", fieldname) - continue - } - def = g.DefaultPackageName(obj) + enum.prefix() + def - } - g.P(kind, fieldname, " ", typename, " = ", def) - g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) - } - g.P() -} - -// generateInternalStructFields just adds the XXX_ fields to the message struct. -func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { - g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals - if len(mc.message.ExtensionRange) > 0 { - messageset := "" - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - messageset = "protobuf_messageset:\"1\" " - } - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") - } - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - g.P("XXX_sizecache\tint32 `json:\"-\"`") - -} - -// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. -func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { - ofields := []*oneofField{} - for _, f := range topLevelFields { - if o, ok := f.(*oneofField); ok { - ofields = append(ofields, o) - } - } - if len(ofields) == 0 { - return - } - - // OneofFuncs - g.P("// XXX_OneofWrappers is for the internal use of the proto package.") - g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") - g.P("return []interface{}{") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.typedNil(g) - } - } - g.P("}") - g.P("}") - g.P() -} - -// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. -func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { - comments := g.PrintComments(mc.message.path) - - // Guarantee deprecation comments appear after user-provided comments. - if mc.message.GetOptions().GetDeprecated() { - if comments { - // Convention: Separate deprecation comments from original - // comments with an empty line. - g.P("//") - } - g.P(deprecationComment) - } - - g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") - for _, pf := range topLevelFields { - pf.decl(g, mc) - } - g.generateInternalStructFields(mc, topLevelFields) - g.P("}") -} - -// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.getter(g, mc) - } -} - -// generateSetters add setters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.setter(g, mc) - } -} - -// generateCommonMethods adds methods to the message that are not on a per field basis. -func (g *Generator) generateCommonMethods(mc *msgCtx) { - // Reset, String and ProtoMessage methods. - g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") - g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") - g.P("func (*", mc.goName, ") ProtoMessage() {}") - var indexes []string - for m := mc.message; m != nil; m = m.parent { - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - // TODO: Revisit the decision to use a XXX_WellKnownType method - // if we change proto.MessageName to work with multiple equivalents. - if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { - g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) - g.P() - } - - // Extension support methods - if len(mc.message.ExtensionRange) > 0 { - g.P() - g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") - for _, r := range mc.message.ExtensionRange { - end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{Start: ", r.Start, ", End: ", end, "},") - } - g.P("}") - g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") - g.P("return extRange_", mc.goName) - g.P("}") - g.P() - } - - // TODO: It does not scale to keep adding another method for every - // operation on protos that we want to switch over to using the - // table-driven approach. Instead, we should only add a single method - // that allows getting access to the *InternalMessageInfo struct and then - // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. - - // Wrapper for table-driven marshaling and unmarshaling. - g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") - g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") - g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") - g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message - g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") - g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") - g.P("}") - - g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") - g.P() -} - -// Generate the type, methods and default constant definitions for this Descriptor. -func (g *Generator) generateMessage(message *Descriptor) { - topLevelFields := []topLevelField{} - oFields := make(map[int32]*oneofField) - // The full type name - typeName := message.TypeName() - // The full type name, CamelCased. - goTypeName := CamelCaseSlice(typeName) - - usedNames := make(map[string]bool) - for _, n := range methodNames { - usedNames[n] = true - } - - // allocNames finds a conflict-free variation of the given strings, - // consistently mutating their suffixes. - // It returns the same number of strings. - allocNames := func(ns ...string) []string { - Loop: - for { - for _, n := range ns { - if usedNames[n] { - for i := range ns { - ns[i] += "_" - } - continue Loop - } - } - for _, n := range ns { - usedNames[n] = true - } - return ns - } - } - - mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later - - // Build a structure more suitable for generating the text in one pass - for i, field := range message.Field { - // Allocate the getter and the field at the same time so name - // collisions create field/method consistent names. - // TODO: This allocation occurs based on the order of the fields - // in the proto file, meaning that a change in the field - // ordering can change generated Method/Field names. - base := CamelCase(*field.Name) - ns := allocNames(base, "Get"+base) - fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") - - oneof := field.OneofIndex != nil - if oneof && oFields[*field.OneofIndex] == nil { - odp := message.OneofDecl[int(*field.OneofIndex)] - base := CamelCase(odp.GetName()) - fname := allocNames(base)[0] - - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) - c, ok := g.makeComments(oneofFullPath) - if ok { - c += "\n//\n" - } - c += "// Types that are valid to be assigned to " + fname + ":\n" - // Generate the rest of this comment later, - // when we've computed any disambiguation. - - dname := "is" + goTypeName + "_" + fname - tag := `protobuf_oneof:"` + odp.GetName() + `"` - of := oneofField{ - fieldCommon: fieldCommon{ - goName: fname, - getterName: "Get"+fname, - goType: dname, - tags: tag, - protoName: odp.GetName(), - fullPath: oneofFullPath, - }, - comment: c, - } - topLevelFields = append(topLevelFields, &of) - oFields[*field.OneofIndex] = &of - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - // Figure out the Go types and tags for the key and value types. - keyField, valField := d.Field[0], d.Field[1] - keyType, keyWire := g.GoType(d, keyField) - valType, valWire := g.GoType(d, valField) - keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) - - // We don't use stars, except for message-typed values. - // Message and enum types are the only two possibly foreign types used in maps, - // so record their use. They are not permitted as map keys. - keyType = strings.TrimPrefix(keyType, "*") - switch *valField.Type { - case descriptor.FieldDescriptorProto_TYPE_ENUM: - valType = strings.TrimPrefix(valType, "*") - g.RecordTypeUse(valField.GetTypeName()) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.RecordTypeUse(valField.GetTypeName()) - default: - valType = strings.TrimPrefix(valType, "*") - } - - typename = fmt.Sprintf("map[%s]%s", keyType, valType) - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) - } - } - - fieldDeprecated := "" - if field.GetOptions().GetDeprecated() { - fieldDeprecated = deprecationComment - } - - dvalue := g.getterDefault(field, goTypeName) - if oneof { - tname := goTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break - } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break - } - } - if !ok { - tname += "_" - continue - } - break - } - - oneofField := oFields[*field.OneofIndex] - tag := "protobuf:" + g.goTag(message, field, wiretype) - sf := oneofSubField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), - }, - protoTypeName: field.GetTypeName(), - fieldNumber: int(*field.Number), - protoType: *field.Type, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - oneofTypeName: tname, - deprecated: fieldDeprecated, - } - oneofField.subFields = append(oneofField.subFields, &sf) - g.RecordTypeUse(field.GetTypeName()) - continue - } - - fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - c, ok := g.makeComments(fieldFullPath) - if ok { - c += "\n" - } - rf := simpleField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fieldFullPath, - }, - protoTypeName: field.GetTypeName(), - protoType: *field.Type, - deprecated: fieldDeprecated, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - comment: c, - } - var pf topLevelField = &rf - - topLevelFields = append(topLevelFields, pf) - g.RecordTypeUse(field.GetTypeName()) - } - - mc := &msgCtx{ - goName: goTypeName, - message: message, - } - - g.generateMessageStruct(mc, topLevelFields) - g.P() - g.generateCommonMethods(mc) - g.P() - g.generateDefaultConstants(mc, topLevelFields) - g.P() - g.generateGetters(mc, topLevelFields) - g.P() - g.generateSetters(mc, topLevelFields) - g.P() - g.generateOneofFuncs(mc, topLevelFields) - g.P() - - var oneofTypes []string - for _, f := range topLevelFields { - if of, ok := f.(*oneofField); ok { - for _, osf := range of.subFields { - oneofTypes = append(oneofTypes, osf.oneofTypeName) - } - } - } - - opts := message.Options - ms := &messageSymbol{ - sym: goTypeName, - hasExtensions: len(message.ExtensionRange) > 0, - isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), - oneofTypes: oneofTypes, - } - g.file.addExport(message, ms) - - for _, ext := range message.ext { - g.generateExtension(ext) - } - - fullName := strings.Join(message.TypeName(), ".") - if g.file.Package != nil { - fullName = *g.file.Package + "." + fullName - } - - g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) - // Register types for native map types. - for _, k := range mapFieldKeys(mapFieldTypes) { - fullName := strings.TrimPrefix(*k.TypeName, ".") - g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) - } - -} - -type byTypeName []*descriptor.FieldDescriptorProto - -func (a byTypeName) Len() int { return len(a) } -func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } - -// mapFieldKeys returns the keys of m in a consistent order. -func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { - keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Sort(byTypeName(keys)) - return keys -} - -var escapeChars = [256]byte{ - 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', -} - -// unescape reverses the "C" escaping that protoc does for default values of bytes fields. -// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape -// sequences are conveyed, unmodified, into the decoded result. -func unescape(s string) string { - // NB: Sadly, we can't use strconv.Unquote because protoc will escape both - // single and double quotes, but strconv.Unquote only allows one or the - // other (based on actual surrounding quotes of its input argument). - - var out []byte - for len(s) > 0 { - // regular character, or too short to be valid escape - if s[0] != '\\' || len(s) < 2 { - out = append(out, s[0]) - s = s[1:] - } else if c := escapeChars[s[1]]; c != 0 { - // escape sequence - out = append(out, c) - s = s[2:] - } else if s[1] == 'x' || s[1] == 'X' { - // hex escape, e.g. "\x80 - if len(s) < 4 { - // too short to be valid - out = append(out, s[:2]...) - s = s[2:] - continue - } - v, err := strconv.ParseUint(s[2:4], 16, 8) - if err != nil { - out = append(out, s[:4]...) - } else { - out = append(out, byte(v)) - } - s = s[4:] - } else if '0' <= s[1] && s[1] <= '7' { - // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" - // so consume up to 2 more bytes or up to end-of-string - n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(s[1:1+n], 8, 8) - if err != nil { - out = append(out, s[:1+n]...) - } else { - out = append(out, byte(v)) - } - s = s[1+n:] - } else { - // bad escape, just propagate the slash as-is - out = append(out, s[0]) - s = s[1:] - } - } - - return string(out) -} - -func (g *Generator) generateExtension(ext *ExtensionDescriptor) { - ccTypeName := ext.DescName() - - extObj := g.ObjectNamed(*ext.Extendee) - var extDesc *Descriptor - if id, ok := extObj.(*ImportedDescriptor); ok { - // This is extending a publicly imported message. - // We need the underlying type for goTag. - extDesc = id.o.(*Descriptor) - } else { - extDesc = extObj.(*Descriptor) - } - extendedType := "*" + g.TypeName(extObj) // always use the original - field := ext.FieldDescriptorProto - fieldType, wireType := g.GoType(ext.parent, field) - tag := g.goTag(extDesc, field, wireType) - g.RecordTypeUse(*ext.Extendee) - if n := ext.FieldDescriptorProto.TypeName; n != nil { - // foreign extension type - g.RecordTypeUse(*n) - } - - typeName := ext.TypeName() - - // Special case for proto2 message sets: If this extension is extending - // proto2.bridge.MessageSet, and its final name component is "message_set_extension", - // then drop that last component. - // - // TODO: This should be implemented in the text formatter rather than the generator. - // In addition, the situation for when to apply this special case is implemented - // differently in other languages: - // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 - if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { - typeName = typeName[:len(typeName)-1] - } - - // For text formatting, the package must be exactly what the .proto file declares, - // ignoring overrides such as the go_package option, and with no dot/underscore mapping. - extName := strings.Join(typeName, ".") - if g.file.Package != nil { - extName = *g.file.Package + "." + extName - } - - g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") - g.P("ExtendedType: (", extendedType, ")(nil),") - g.P("ExtensionType: (", fieldType, ")(nil),") - g.P("Field: ", field.Number, ",") - g.P(`Name: "`, extName, `",`) - g.P("Tag: ", tag, ",") - g.P(`Filename: "`, g.file.GetName(), `",`) - - g.P("}") - g.P() - - g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) - - g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) -} - -func (g *Generator) generateInitFunction() { - if len(g.init) == 0 { - return - } - g.P("func init() {") - for _, l := range g.init { - g.P(l) - } - g.P("}") - g.init = nil -} - -func (g *Generator) generateFileDescriptor(file *FileDescriptor) { - // Make a copy and trim source_code_info data. - // TODO: Trim this more when we know exactly what we need. - pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) - pb.SourceCodeInfo = nil - - b, err := proto.Marshal(pb) - if err != nil { - g.Fail(err.Error()) - } - - var buf bytes.Buffer - w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) - w.Write(b) - w.Close() - b = buf.Bytes() - - v := file.VarName() - g.P() - g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") - g.P("var ", v, " = []byte{") - g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] - } - g.P("}") -} - -func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { - // // We always print the full (proto-world) package name here. - pkg := enum.File().GetPackage() - if pkg != "" { - pkg += "." - } - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) -} - -// And now lots of helper functions. - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase returns the CamelCased name. -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -// There is a remote possibility of this rewrite causing a name collision, -// but it's so remote we're prepared to pretend it's nonexistent - since the -// C++ generator lowercases names, it's extremely unlikely to have two fields -// with different capitalizations. -// In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_". -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// dottedSlice turns a sliced name into a dotted name. -func dottedSlice(elem []string) string { return strings.Join(elem, ".") } - -// Is this field optional? -func isOptional(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL -} - -// Is this field required? -func isRequired(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED -} - -// Is this field repeated? -func isRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - -// badToUnderscore is the mapping function used to generate Go names from package names, -// which can be dotted in the input .proto file. It replaces non-identifier characters such as -// dot or dash with underscore. -func badToUnderscore(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// baseName returns the last path element of the name, with the last dotted suffix removed. -func baseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// The SourceCodeInfo message describes the location of elements of a parsed -// .proto file by way of a "path", which is a sequence of integers that -// describe the route from a FileDescriptorProto to the relevant submessage. -// The path alternates between a field number of a repeated field, and an index -// into that repeated field. The constants below define the field numbers that -// are used. -// -// See descriptor.proto for more information about this. -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - enumPath = 5 // enum_type - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - messageEnumPath = 4 // enum_type - messageOneofPath = 8 // oneof_decl - // tag numbers in EnumDescriptorProto - enumValuePath = 2 // value -) - -var supportTypeAliases bool - -func init() { - for _, tag := range build.Default.ReleaseTags { - if tag == "go1.9" { - supportTypeAliases = true - return - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go deleted file mode 100644 index a9b61036cc..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ /dev/null @@ -1,117 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package remap handles tracking the locations of Go tokens in a source text -across a rewrite by the Go formatter. -*/ -package remap - -import ( - "fmt" - "go/scanner" - "go/token" -) - -// A Location represents a span of byte offsets in the source text. -type Location struct { - Pos, End int // End is exclusive -} - -// A Map represents a mapping between token locations in an input source text -// and locations in the correspnding output text. -type Map map[Location]Location - -// Find reports whether the specified span is recorded by m, and if so returns -// the new location it was mapped to. If the input span was not found, the -// returned location is the same as the input. -func (m Map) Find(pos, end int) (Location, bool) { - key := Location{ - Pos: pos, - End: end, - } - if loc, ok := m[key]; ok { - return loc, true - } - return key, false -} - -func (m Map) add(opos, oend, npos, nend int) { - m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} -} - -// Compute constructs a location mapping from input to output. An error is -// reported if any of the tokens of output cannot be mapped. -func Compute(input, output []byte) (Map, error) { - itok := tokenize(input) - otok := tokenize(output) - if len(itok) != len(otok) { - return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) - } - m := make(Map) - for i, ti := range itok { - to := otok[i] - if ti.Token != to.Token { - return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) - } - m.add(ti.pos, ti.end, to.pos, to.end) - } - return m, nil -} - -// tokinfo records the span and type of a source token. -type tokinfo struct { - pos, end int - token.Token -} - -func tokenize(src []byte) []tokinfo { - fs := token.NewFileSet() - var s scanner.Scanner - s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) - var info []tokinfo - for { - pos, next, lit := s.Scan() - switch next { - case token.SEMICOLON: - continue - } - info = append(info, tokinfo{ - pos: int(pos - 1), - end: int(pos + token.Pos(len(lit)) - 1), - Token: next, - }) - if next == token.EOF { - break - } - } - return info -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go deleted file mode 100644 index 61bfc10e02..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/compiler/plugin.proto - -/* -Package plugin_go is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/compiler/plugin.proto - -It has these top-level messages: - Version - CodeGeneratorRequest - CodeGeneratorResponse -*/ -package plugin_go - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of protocol compiler. -type Version struct { - Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Version) Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil && m.Major != nil { - return *m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil && m.Minor != nil { - return *m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil && m.Patch != nil { - return *m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil && m.Suffix != nil { - return *m.Suffix - } - return "" -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -type CodeGeneratorRequest struct { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` - // The generator parameter passed on the command-line. - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) -} -func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) -} -func (m *CodeGeneratorRequest) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorRequest.Size(m) -} -func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo - -func (m *CodeGeneratorRequest) GetFileToGenerate() []string { - if m != nil { - return m.FileToGenerate - } - return nil -} - -func (m *CodeGeneratorRequest) GetParameter() string { - if m != nil && m.Parameter != nil { - return *m.Parameter - } - return "" -} - -func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { - if m != nil { - return m.ProtoFile - } - return nil -} - -func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -type CodeGeneratorResponse struct { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) -} -func (m *CodeGeneratorResponse) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse.Size(m) -} -func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo - -func (m *CodeGeneratorResponse) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { - if m != nil { - return m.File - } - return nil -} - -// Represents a single generated file. -type CodeGeneratorResponse_File struct { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` - // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} -func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } -func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) -} -func (m *CodeGeneratorResponse_File) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) -} -func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo - -func (m *CodeGeneratorResponse_File) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { - if m != nil && m.InsertionPoint != nil { - return *m.InsertionPoint - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetContent() string { - if m != nil && m.Content != nil { - return *m.Content - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") - proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") - proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") - proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") -} - -func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, - 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, - 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, - 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, - 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, - 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, - 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, - 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, - 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, - 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, - 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, - 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, - 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, - 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, - 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, - 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, - 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, - 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, - 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, - 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, - 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, - 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, - 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, - 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, - 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, - 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden deleted file mode 100644 index 8953d0ff82..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/compiler/plugin.proto -// DO NOT EDIT! - -package google_protobuf_compiler - -import proto "github.com/golang/protobuf/proto" -import "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type CodeGeneratorRequest struct { - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } -func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorRequest) ProtoMessage() {} - -func (this *CodeGeneratorRequest) GetParameter() string { - if this != nil && this.Parameter != nil { - return *this.Parameter - } - return "" -} - -type CodeGeneratorResponse struct { - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } -func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse) ProtoMessage() {} - -func (this *CodeGeneratorResponse) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -type CodeGeneratorResponse_File struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } -func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} - -func (this *CodeGeneratorResponse_File) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { - if this != nil && this.InsertionPoint != nil { - return *this.InsertionPoint - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetContent() string { - if this != nil && this.Content != nil { - return *this.Content - } - return "" -} - -func init() { -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto deleted file mode 100644 index 5b5574529e..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto +++ /dev/null @@ -1,167 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 78ee523349..e3c56d3ffa 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -package any +package any // import "github.com/golang/protobuf/ptypes/any" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -101,18 +99,17 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // } // type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: // // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -121,10 +118,6 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // @@ -140,19 +133,17 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} + return fileDescriptor_any_744b9ca530f228db, []int{0} } - func (*Any) XXX_WellKnownType() string { return "Any" } - func (m *Any) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Any.Unmarshal(m, b) } func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Any.Marshal(b, m, deterministic) } -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) } func (m *Any) XXX_Size() int { return xxx_messageInfo_Any.Size(m) @@ -181,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor_b53526c13ae22eb4 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 4932942558..c748667623 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -120,18 +120,17 @@ option objc_class_prefix = "GPB"; // } // message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: // // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -140,10 +139,6 @@ message Any { // on changes to types. (Use versioned type names to manage // breaking changes.) // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2fb5..65cb0f8eb5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond + d += time.Duration(p.Nanos) if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 0d681ee21a..a7beb2c414 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -101,19 +99,17 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} + return fileDescriptor_duration_e7d612259e3f0613, []int{0} } - func (*Duration) XXX_WellKnownType() string { return "Duration" } - func (m *Duration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Duration.Unmarshal(m, b) } func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Duration.Marshal(b, m, deterministic) } -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) } func (m *Duration) XXX_Size() int { return xxx_messageInfo_Duration.Size(m) @@ -142,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 33daa73dd2..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,336 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/struct.proto - -package structpb - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -type NullValue int32 - -const ( - // Null value. - NullValue_NULL_VALUE NullValue = 0 -) - -var NullValue_name = map[int32]string{ - 0: "NULL_VALUE", -} - -var NullValue_value = map[string]int32{ - "NULL_VALUE": 0, -} - -func (x NullValue) String() string { - return proto.EnumName(NullValue_name, int32(x)) -} - -func (NullValue) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{0} -} - -func (NullValue) XXX_WellKnownType() string { return "NullValue" } - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -type Struct struct { - // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{0} -} - -func (*Struct) XXX_WellKnownType() string { return "Struct" } - -func (m *Struct) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Struct.Unmarshal(m, b) -} -func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Struct.Marshal(b, m, deterministic) -} -func (m *Struct) XXX_Merge(src proto.Message) { - xxx_messageInfo_Struct.Merge(m, src) -} -func (m *Struct) XXX_Size() int { - return xxx_messageInfo_Struct.Size(m) -} -func (m *Struct) XXX_DiscardUnknown() { - xxx_messageInfo_Struct.DiscardUnknown(m) -} - -var xxx_messageInfo_Struct proto.InternalMessageInfo - -func (m *Struct) GetFields() map[string]*Value { - if m != nil { - return m.Fields - } - return nil -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -type Value struct { - // The kind of value. - // - // Types that are valid to be assigned to Kind: - // *Value_NullValue - // *Value_NumberValue - // *Value_StringValue - // *Value_BoolValue - // *Value_StructValue - // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{1} -} - -func (*Value) XXX_WellKnownType() string { return "Value" } - -func (m *Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Value.Unmarshal(m, b) -} -func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Value.Marshal(b, m, deterministic) -} -func (m *Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Value.Merge(m, src) -} -func (m *Value) XXX_Size() int { - return xxx_messageInfo_Value.Size(m) -} -func (m *Value) XXX_DiscardUnknown() { - xxx_messageInfo_Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Value proto.InternalMessageInfo - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Value_NumberValue struct { - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` -} - -type Value_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_StructValue struct { - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` -} - -type Value_ListValue struct { - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} - -func (*Value_NumberValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_StructValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (m *Value) GetNullValue() NullValue { - if x, ok := m.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return NullValue_NULL_VALUE -} - -func (m *Value) GetNumberValue() float64 { - if x, ok := m.GetKind().(*Value_NumberValue); ok { - return x.NumberValue - } - return 0 -} - -func (m *Value) GetStringValue() string { - if x, ok := m.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *Value) GetBoolValue() bool { - if x, ok := m.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *Value) GetStructValue() *Struct { - if x, ok := m.GetKind().(*Value_StructValue); ok { - return x.StructValue - } - return nil -} - -func (m *Value) GetListValue() *ListValue { - if x, ok := m.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Value) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Value_NullValue)(nil), - (*Value_NumberValue)(nil), - (*Value_StringValue)(nil), - (*Value_BoolValue)(nil), - (*Value_StructValue)(nil), - (*Value_ListValue)(nil), - } -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -type ListValue struct { - // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{2} -} - -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } - -func (m *ListValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListValue.Unmarshal(m, b) -} -func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) -} -func (m *ListValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListValue.Merge(m, src) -} -func (m *ListValue) XXX_Size() int { - return xxx_messageInfo_ListValue.Size(m) -} -func (m *ListValue) XXX_DiscardUnknown() { - xxx_messageInfo_ListValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ListValue proto.InternalMessageInfo - -func (m *ListValue) GetValues() []*Value { - if m != nil { - return m.Values - } - return nil -} - -func init() { - proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) - proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") - proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") - proto.RegisterType((*Value)(nil), "google.protobuf.Value") - proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") -} - -func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } - -var fileDescriptor_df322afd6c9fb402 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, - 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, - 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, - 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, - 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, - 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, - 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, - 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, - 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, - 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, - 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, - 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, - 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, - 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, - 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, - 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, - 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, - 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, - 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, - 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, - 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, - 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, - 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, - 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, - 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto deleted file mode 100644 index 7d7808e7fb..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -message Struct { - // Unordered map of dynamically typed values. - map fields = 1; -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -message Value { - // The kind of value. - oneof kind { - // Represents a null value. - NullValue null_value = 1; - // Represents a double value. - double number_value = 2; - // Represents a string value. - string string_value = 3; - // Represents a boolean value. - bool bool_value = 4; - // Represents a structured value. - Struct struct_value = 5; - // Represents a repeated `Value`. - ListValue list_value = 6; - } -} - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -enum NullValue { - // Null value. - NULL_VALUE = 0; -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -message ListValue { - // Repeated field of dynamically typed values. - repeated Value values = 1; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df01ac..47f10dbc2c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,9 +111,11 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), + Seconds: seconds, + Nanos: nanos, } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 31cd846de9..8e76ae9763 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -83,9 +81,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). +// is required, though only UTC (as indicated by "Z") is presently supported. // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -96,8 +92,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { @@ -119,19 +115,17 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} } - func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) } func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -160,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor_292007bbfe81227e = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index eafb3fa03a..06750ab1f1 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -103,9 +103,7 @@ option objc_class_prefix = "GPB"; // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). +// is required, though only UTC (as indicated by "Z") is presently supported. // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -116,8 +114,8 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index add19a1adb..0f0fa837f6 100644 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/wrappers.proto -package wrappers +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Wrapper message for `double`. // @@ -35,19 +33,17 @@ func (m *DoubleValue) Reset() { *m = DoubleValue{} } func (m *DoubleValue) String() string { return proto.CompactTextString(m) } func (*DoubleValue) ProtoMessage() {} func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{0} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} } - func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } - func (m *DoubleValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DoubleValue.Unmarshal(m, b) } func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) } -func (m *DoubleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleValue.Merge(m, src) +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) } func (m *DoubleValue) XXX_Size() int { return xxx_messageInfo_DoubleValue.Size(m) @@ -80,19 +76,17 @@ func (m *FloatValue) Reset() { *m = FloatValue{} } func (m *FloatValue) String() string { return proto.CompactTextString(m) } func (*FloatValue) ProtoMessage() {} func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{1} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} } - func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } - func (m *FloatValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FloatValue.Unmarshal(m, b) } func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) } -func (m *FloatValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatValue.Merge(m, src) +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) } func (m *FloatValue) XXX_Size() int { return xxx_messageInfo_FloatValue.Size(m) @@ -125,19 +119,17 @@ func (m *Int64Value) Reset() { *m = Int64Value{} } func (m *Int64Value) String() string { return proto.CompactTextString(m) } func (*Int64Value) ProtoMessage() {} func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{2} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} } - func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } - func (m *Int64Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Int64Value.Unmarshal(m, b) } func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) } -func (m *Int64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int64Value.Merge(m, src) +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) } func (m *Int64Value) XXX_Size() int { return xxx_messageInfo_Int64Value.Size(m) @@ -170,19 +162,17 @@ func (m *UInt64Value) Reset() { *m = UInt64Value{} } func (m *UInt64Value) String() string { return proto.CompactTextString(m) } func (*UInt64Value) ProtoMessage() {} func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{3} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} } - func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } - func (m *UInt64Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UInt64Value.Unmarshal(m, b) } func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) } -func (m *UInt64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt64Value.Merge(m, src) +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) } func (m *UInt64Value) XXX_Size() int { return xxx_messageInfo_UInt64Value.Size(m) @@ -215,19 +205,17 @@ func (m *Int32Value) Reset() { *m = Int32Value{} } func (m *Int32Value) String() string { return proto.CompactTextString(m) } func (*Int32Value) ProtoMessage() {} func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{4} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} } - func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } - func (m *Int32Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Int32Value.Unmarshal(m, b) } func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) } -func (m *Int32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int32Value.Merge(m, src) +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) } func (m *Int32Value) XXX_Size() int { return xxx_messageInfo_Int32Value.Size(m) @@ -260,19 +248,17 @@ func (m *UInt32Value) Reset() { *m = UInt32Value{} } func (m *UInt32Value) String() string { return proto.CompactTextString(m) } func (*UInt32Value) ProtoMessage() {} func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{5} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} } - func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } - func (m *UInt32Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UInt32Value.Unmarshal(m, b) } func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) } -func (m *UInt32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt32Value.Merge(m, src) +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) } func (m *UInt32Value) XXX_Size() int { return xxx_messageInfo_UInt32Value.Size(m) @@ -305,19 +291,17 @@ func (m *BoolValue) Reset() { *m = BoolValue{} } func (m *BoolValue) String() string { return proto.CompactTextString(m) } func (*BoolValue) ProtoMessage() {} func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{6} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} } - func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } - func (m *BoolValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BoolValue.Unmarshal(m, b) } func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) } -func (m *BoolValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolValue.Merge(m, src) +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) } func (m *BoolValue) XXX_Size() int { return xxx_messageInfo_BoolValue.Size(m) @@ -350,19 +334,17 @@ func (m *StringValue) Reset() { *m = StringValue{} } func (m *StringValue) String() string { return proto.CompactTextString(m) } func (*StringValue) ProtoMessage() {} func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{7} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} } - func (*StringValue) XXX_WellKnownType() string { return "StringValue" } - func (m *StringValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StringValue.Unmarshal(m, b) } func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) } -func (m *StringValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringValue.Merge(m, src) +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) } func (m *StringValue) XXX_Size() int { return xxx_messageInfo_StringValue.Size(m) @@ -395,19 +377,17 @@ func (m *BytesValue) Reset() { *m = BytesValue{} } func (m *BytesValue) String() string { return proto.CompactTextString(m) } func (*BytesValue) ProtoMessage() {} func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{8} + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} } - func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } - func (m *BytesValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BytesValue.Unmarshal(m, b) } func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) } -func (m *BytesValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesValue.Merge(m, src) +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) } func (m *BytesValue) XXX_Size() int { return xxx_messageInfo_BytesValue.Size(m) @@ -437,9 +417,11 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} -var fileDescriptor_5377b62bda767935 = []byte{ +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ // 259 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 4fd44c45e2..5351f36f36 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -7105,15 +7105,15 @@ func (m *Any) ToRawInfo() interface{} { // ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. func (m *ApiKeySecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7129,11 +7129,9 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7149,21 +7147,21 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. func (m *BodyParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + if m.Schema != nil { + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { @@ -7177,9 +7175,6 @@ func (m *BodyParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of Contact suitable for JSON or YAML export. func (m *Contact) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7201,9 +7196,6 @@ func (m *Contact) ToRawInfo() interface{} { // ToRawInfo returns a description of Default suitable for JSON or YAML export. func (m *Default) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7216,9 +7208,6 @@ func (m *Default) ToRawInfo() interface{} { // ToRawInfo returns a description of Definitions suitable for JSON or YAML export. func (m *Definitions) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7231,13 +7220,12 @@ func (m *Definitions) ToRawInfo() interface{} { // ToRawInfo returns a description of Document suitable for JSON or YAML export. func (m *Document) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Swagger != "" { + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + } + if m.Info != nil { + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Host != "" { info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) @@ -7254,8 +7242,9 @@ func (m *Document) ToRawInfo() interface{} { if len(m.Produces) != 0 { info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) + if m.Paths != nil { + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) + } // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Definitions != nil { info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) @@ -7305,9 +7294,6 @@ func (m *Document) ToRawInfo() interface{} { // ToRawInfo returns a description of Examples suitable for JSON or YAML export. func (m *Examples) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7320,14 +7306,12 @@ func (m *Examples) ToRawInfo() interface{} { // ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. func (m *ExternalDocs) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } if m.VendorExtension != nil { for _, item := range m.VendorExtension { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7340,9 +7324,6 @@ func (m *ExternalDocs) ToRawInfo() interface{} { // ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. func (m *FileSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Format != "" { info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } @@ -7359,8 +7340,9 @@ func (m *FileSchema) ToRawInfo() interface{} { if len(m.Required) != 0 { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } if m.ReadOnly != false { info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } @@ -7384,9 +7366,6 @@ func (m *FileSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -7472,11 +7451,9 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Header suitable for JSON or YAML export. func (m *Header) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) if m.Format != "" { info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } @@ -7547,9 +7524,6 @@ func (m *Header) ToRawInfo() interface{} { // ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -7632,9 +7606,6 @@ func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Headers suitable for JSON or YAML export. func (m *Headers) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7647,13 +7618,12 @@ func (m *Headers) ToRawInfo() interface{} { // ToRawInfo returns a description of Info suitable for JSON or YAML export. func (m *Info) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Version != "" { + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7680,9 +7650,6 @@ func (m *Info) ToRawInfo() interface{} { // ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. func (m *ItemsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if len(m.Schema) != 0 { items := make([]interface{}, 0) for _, item := range m.Schema { @@ -7697,11 +7664,9 @@ func (m *ItemsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. func (m *JsonReference) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7711,11 +7676,9 @@ func (m *JsonReference) ToRawInfo() interface{} { // ToRawInfo returns a description of License suitable for JSON or YAML export. func (m *License) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) if m.Url != "" { info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } @@ -7731,9 +7694,6 @@ func (m *License) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. func (m *NamedAny) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7744,9 +7704,6 @@ func (m *NamedAny) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. func (m *NamedHeader) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7757,9 +7714,6 @@ func (m *NamedHeader) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. func (m *NamedParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7770,9 +7724,6 @@ func (m *NamedParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. func (m *NamedPathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7783,9 +7734,6 @@ func (m *NamedPathItem) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. func (m *NamedResponse) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7796,9 +7744,6 @@ func (m *NamedResponse) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. func (m *NamedResponseValue) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7809,9 +7754,6 @@ func (m *NamedResponseValue) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. func (m *NamedSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7822,9 +7764,6 @@ func (m *NamedSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7835,9 +7774,6 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedString suitable for JSON or YAML export. func (m *NamedString) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7850,9 +7786,6 @@ func (m *NamedString) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. func (m *NamedStringArray) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7890,21 +7823,22 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + } + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7920,19 +7854,19 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7948,19 +7882,19 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7976,19 +7910,19 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -8004,9 +7938,6 @@ func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. func (m *Oauth2Scopes) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } @@ -8014,9 +7945,6 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { // ToRawInfo returns a description of Operation suitable for JSON or YAML export. func (m *Operation) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if len(m.Tags) != 0 { info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) } @@ -8047,8 +7975,9 @@ func (m *Operation) ToRawInfo() interface{} { info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - // always include this required field. - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + if m.Responses != nil { + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + } // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Schemes) != 0 { info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) @@ -8093,9 +8022,6 @@ func (m *Parameter) ToRawInfo() interface{} { // ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. func (m *ParameterDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8125,9 +8051,6 @@ func (m *ParametersItem) ToRawInfo() interface{} { // ToRawInfo returns a description of PathItem suitable for JSON or YAML export. func (m *PathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.XRef != "" { info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } @@ -8179,11 +8102,9 @@ func (m *PathItem) ToRawInfo() interface{} { // ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. func (m *PathParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) if m.In != "" { info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } @@ -8263,9 +8184,6 @@ func (m *PathParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Paths suitable for JSON or YAML export. func (m *Paths) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.VendorExtension != nil { for _, item := range m.VendorExtension { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8284,9 +8202,6 @@ func (m *Paths) ToRawInfo() interface{} { // ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. func (m *PrimitivesItems) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Type != "" { info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } @@ -8357,9 +8272,6 @@ func (m *PrimitivesItems) ToRawInfo() interface{} { // ToRawInfo returns a description of Properties suitable for JSON or YAML export. func (m *Properties) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8372,9 +8284,6 @@ func (m *Properties) ToRawInfo() interface{} { // ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. func (m *QueryParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -8460,11 +8369,9 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Response suitable for JSON or YAML export. func (m *Response) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) if m.Schema != nil { info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } @@ -8489,9 +8396,6 @@ func (m *Response) ToRawInfo() interface{} { // ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. func (m *ResponseDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8521,9 +8425,6 @@ func (m *ResponseValue) ToRawInfo() interface{} { // ToRawInfo returns a description of Responses suitable for JSON or YAML export. func (m *Responses) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.ResponseCode != nil { for _, item := range m.ResponseCode { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8542,9 +8443,6 @@ func (m *Responses) ToRawInfo() interface{} { // ToRawInfo returns a description of Schema suitable for JSON or YAML export. func (m *Schema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.XRef != "" { info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } @@ -8690,9 +8588,6 @@ func (m *SchemaItem) ToRawInfo() interface{} { // ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. func (m *SecurityDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8742,9 +8637,6 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. func (m *SecurityRequirement) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8762,11 +8654,9 @@ func (m *StringArray) ToRawInfo() interface{} { // ToRawInfo returns a description of Tag suitable for JSON or YAML export. func (m *Tag) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } - // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -8786,9 +8676,6 @@ func (m *Tag) ToRawInfo() interface{} { // ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. func (m *TypeItem) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if len(m.Value) != 0 { info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } @@ -8798,9 +8685,6 @@ func (m *TypeItem) ToRawInfo() interface{} { // ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. func (m *VendorExtension) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8813,9 +8697,6 @@ func (m *VendorExtension) ToRawInfo() interface{} { // ToRawInfo returns a description of Xml suitable for JSON or YAML export. func (m *Xml) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m == nil { - return info - } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index af10e0d1bc..c954a2d9b2 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -17,14 +17,13 @@ package compiler import ( "errors" "fmt" + "gopkg.in/yaml.v2" "io/ioutil" "log" "net/http" "net/url" "path/filepath" "strings" - - yaml "gopkg.in/yaml.v2" ) var fileCache map[string][]byte @@ -32,8 +31,6 @@ var infoCache map[string]interface{} var count int64 var verboseReader = false -var fileCacheEnable = true -var infoCacheEnable = true func initializeFileCache() { if fileCache == nil { @@ -47,56 +44,29 @@ func initializeInfoCache() { } } -func DisableFileCache() { - fileCacheEnable = false -} - -func DisableInfoCache() { - infoCacheEnable = false -} - -func RemoveFromFileCache(fileurl string) { - if !fileCacheEnable { - return - } - initializeFileCache() - delete(fileCache, fileurl) -} - -func RemoveFromInfoCache(filename string) { - if !infoCacheEnable { - return - } - initializeInfoCache() - delete(infoCache, filename) -} - // FetchFile gets a specified file from the local filesystem or a remote location. func FetchFile(fileurl string) ([]byte, error) { - var bytes []byte initializeFileCache() - if fileCacheEnable { - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } + bytes, ok := fileCache[fileurl] + if ok { if verboseReader { - log.Printf("Fetching %s", fileurl) + log.Printf("Cache hit %s", fileurl) } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) } response, err := http.Get(fileurl) if err != nil { return nil, err } - defer response.Body.Close() if response.StatusCode != 200 { return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) } + defer response.Body.Close() bytes, err = ioutil.ReadAll(response.Body) - if fileCacheEnable && err == nil { + if err == nil { fileCache[fileurl] = bytes } return bytes, err @@ -125,24 +95,22 @@ func ReadBytesForFile(filename string) ([]byte, error) { // ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { initializeInfoCache() - if infoCacheEnable { - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } + cachedInfo, ok := infoCache[filename] + if ok { if verboseReader { - log.Printf("Reading info for file %s", filename) + log.Printf("Cache hit info for file %s", filename) } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) } var info yaml.MapSlice err := yaml.Unmarshal(bytes, &info) if err != nil { return nil, err } - if infoCacheEnable && len(filename) > 0 { + if len(filename) > 0 { infoCache[filename] = info } return info, nil @@ -151,7 +119,7 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. func ReadInfoForRef(basefile string, ref string) (interface{}, error) { initializeInfoCache() - if infoCacheEnable { + { info, ok := infoCache[ref] if ok { if verboseReader { @@ -159,9 +127,9 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } return info, nil } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) } count = count + 1 basedir, _ := filepath.Split(basefile) @@ -202,8 +170,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } } } - if infoCacheEnable { - infoCache[ref] = info - } + infoCache[ref] = info return info, nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index e6927896f9..749ff78416 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,12 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: extension.proto +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ package openapiextension_v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import any "github.com/golang/protobuf/ptypes/any" +import google_protobuf "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -21,40 +33,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The version number of OpenAPI compiler. type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{0} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Version) GetMajor() int32 { if m != nil { @@ -88,37 +78,15 @@ func (m *Version) GetSuffix() string { type ExtensionHandlerRequest struct { // The OpenAPI descriptions that were explicitly listed on the command line. // The specifications will appear in the order they are specified to gnostic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{1} -} -func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) -} -func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) -} -func (dst *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerRequest.Merge(dst, src) -} -func (m *ExtensionHandlerRequest) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerRequest.Size(m) -} -func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` } -var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { if m != nil { @@ -137,7 +105,7 @@ func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { // The extensions writes an encoded ExtensionHandlerResponse to stdout. type ExtensionHandlerResponse struct { // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` // Error message. If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. @@ -147,37 +115,15 @@ type ExtensionHandlerResponse struct { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` // text output - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` } -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{2} -} -func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) -} -func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) -} -func (dst *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerResponse.Merge(dst, src) -} -func (m *ExtensionHandlerResponse) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerResponse.Size(m) -} -func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *ExtensionHandlerResponse) GetHandled() bool { if m != nil { @@ -193,7 +139,7 @@ func (m *ExtensionHandlerResponse) GetError() []string { return nil } -func (m *ExtensionHandlerResponse) GetValue() *any.Any { +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { if m != nil { return m.Value } @@ -202,39 +148,17 @@ func (m *ExtensionHandlerResponse) GetValue() *any.Any { type Wrapper struct { // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{3} -} -func (m *Wrapper) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Wrapper.Unmarshal(m, b) -} -func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) -} -func (dst *Wrapper) XXX_Merge(src proto.Message) { - xxx_messageInfo_Wrapper.Merge(dst, src) -} -func (m *Wrapper) XXX_Size() int { - return xxx_messageInfo_Wrapper.Size(m) -} -func (m *Wrapper) XXX_DiscardUnknown() { - xxx_messageInfo_Wrapper.DiscardUnknown(m) + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` } -var xxx_messageInfo_Wrapper proto.InternalMessageInfo +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *Wrapper) GetVersion() string { if m != nil { @@ -264,9 +188,9 @@ func init() { proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") } -func init() { proto.RegisterFile("extension.proto", fileDescriptor_extension_d25f09c742c58c90) } +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } -var fileDescriptor_extension_d25f09c742c58c90 = []byte{ +var fileDescriptor0 = []byte{ // 357 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml index 597bc9996f..b5ffbe03d8 100644 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -1,18 +1,19 @@ sudo: false language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master matrix: allow_failures: - go: master fast_finish: true - include: - - go: 1.10.x - - go: 1.11.x - env: GOFMT=1 - - go: master install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - go get -t -v ./... - - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi + - diff -u <(echo -n) <(gofmt -d .) - go tool vet . - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md index 51e7d23d09..09c9e7c173 100644 --- a/vendor/github.com/gregjones/httpcache/README.md +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -7,8 +7,6 @@ Package httpcache provides a http.RoundTripper implementation that works as a mo It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). -This project isn't actively maintained; it works for what I, and seemingly others, want to do with it, and I consider it "done". That said, if you find any issues, please open a Pull Request and I will try to review it. Any changes now that change the public API won't be considered. - Cache Backends -------------- @@ -21,8 +19,6 @@ Cache Backends - [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. - [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). -If you implement any other backend and wish it to be linked here, please send a PR editing this file. - License ------- diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index b41a63d1ff..f6a2ec4a53 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -416,14 +416,14 @@ func canStaleOnError(respHeaders, reqHeaders http.Header) bool { func getEndToEndHeaders(respHeaders http.Header) []string { // These headers are always hop-by-hop hopByHopHeaders := map[string]struct{}{ - "Connection": {}, - "Keep-Alive": {}, - "Proxy-Authenticate": {}, - "Proxy-Authorization": {}, - "Te": {}, - "Trailers": {}, - "Transfer-Encoding": {}, - "Upgrade": {}, + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, } for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { @@ -433,7 +433,7 @@ func getEndToEndHeaders(respHeaders http.Header) []string { } } endToEndHeaders := []string{} - for respHeader := range respHeaders { + for respHeader, _ := range respHeaders { if _, ok := hopByHopHeaders[respHeader]; !ok { endToEndHeaders = append(endToEndHeaders, respHeader) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt deleted file mode 100644 index 364516251b..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 76cafe6ec7..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["stream_chunk.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go deleted file mode 100644 index 8858f06904..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/stream_chunk.proto - -package internal - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} -} -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (dst *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(dst, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { - proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) -} - -var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ - // 223 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, - 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, - 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, - 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, - 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, - 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, - 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, - 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, - 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, - 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, - 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, - 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, - 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, - 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto deleted file mode 100644 index 55f42ce63e..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index c99f83e585..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,80 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//examples/proto/examplepb:go_default_library", - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//:go_default_library", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go deleted file mode 100644 index 896057e1e1..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ /dev/null @@ -1,210 +0,0 @@ -package runtime - -import ( - "context" - "encoding/base64" - "fmt" - "net" - "net/http" - "net/textproto" - "strconv" - "strings" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// MetadataHeaderPrefix is the http prefix that represents custom metadata -// parameters to or from a gRPC call. -const MetadataHeaderPrefix = "Grpc-Metadata-" - -// MetadataPrefix is prepended to permanent HTTP header keys (as specified -// by the IANA) when added to the gRPC context. -const MetadataPrefix = "grpcgateway-" - -// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to -// HTTP headers in a response handled by grpc-gateway -const MetadataTrailerPrefix = "Grpc-Trailer-" - -const metadataGrpcTimeout = "Grpc-Timeout" -const metadataHeaderBinarySuffix = "-Bin" - -const xForwardedFor = "X-Forwarded-For" -const xForwardedHost = "X-Forwarded-Host" - -var ( - // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound - // header isn't present. If the value is 0 the sent `context` will not have a timeout. - DefaultContextTimeout = 0 * time.Second -) - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -/* -AnnotateContext adds context information such as metadata from the request. - -At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", -except that the forwarded destination is not another HTTP service but rather -a gRPC service. -*/ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - var pairs []string - timeout := DefaultContextTimeout - if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { - var err error - timeout, err = timeoutDecode(tm) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) - } - } - - for key, vals := range req.Header { - for _, val := range vals { - key = textproto.CanonicalMIMEHeaderKey(key) - // For backwards-compatibility, pass through 'authorization' header with no prefix. - if key == "Authorization" { - pairs = append(pairs, "authorization", val) - } - if h, ok := mux.incomingHeaderMatcher(key); ok { - // Handles "-bin" metadata in grpc, since grpc will do another base64 - // encode before sending to server, we need to decode it first. - if strings.HasSuffix(key, metadataHeaderBinarySuffix) { - b, err := decodeBinHeader(val) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) - } - - val = string(b) - } - pairs = append(pairs, h, val) - } - } - } - if host := req.Header.Get(xForwardedHost); host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), host) - } else if req.Host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) - } - - if addr := req.RemoteAddr; addr != "" { - if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } - } else { - grpclog.Infof("invalid remote addr: %s", addr) - } - } - - if timeout != 0 { - ctx, _ = context.WithTimeout(ctx, timeout) - } - if len(pairs) == 0 { - return ctx, nil - } - md := metadata.Pairs(pairs...) - for _, mda := range mux.metadataAnnotators { - md = metadata.Join(md, mda(ctx, req)) - } - return metadata.NewOutgoingContext(ctx, md), nil -} - -// ServerMetadata consists of metadata sent from gRPC server. -type ServerMetadata struct { - HeaderMD metadata.MD - TrailerMD metadata.MD -} - -type serverMetadataKey struct{} - -// NewServerMetadataContext creates a new context with ServerMetadata -func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { - return context.WithValue(ctx, serverMetadataKey{}, md) -} - -// ServerMetadataFromContext returns the ServerMetadata in ctx -func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { - md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) - return -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("timeout string is too short: %q", s) - } - d, ok := timeoutUnitToDuration(s[size-1]) - if !ok { - return 0, fmt.Errorf("timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { - switch u { - case 'H': - return time.Hour, true - case 'M': - return time.Minute, true - case 'S': - return time.Second, true - case 'm': - return time.Millisecond, true - case 'u': - return time.Microsecond, true - case 'n': - return time.Nanosecond, true - default: - } - return -} - -// isPermanentHTTPHeader checks whether hdr belongs to the list of -// permenant request headers maintained by IANA. -// http://www.iana.org/assignments/message-headers/message-headers.xml -func isPermanentHTTPHeader(hdr string) bool { - switch hdr { - case - "Accept", - "Accept-Charset", - "Accept-Language", - "Accept-Ranges", - "Authorization", - "Cache-Control", - "Content-Type", - "Cookie", - "Date", - "Expect", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Schedule-Tag-Match", - "If-Unmodified-Since", - "Max-Forwards", - "Origin", - "Pragma", - "Referer", - "User-Agent", - "Via", - "Warning": - return true - } - return false -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go deleted file mode 100644 index a5b3bd6a79..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ /dev/null @@ -1,312 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" -) - -// String just returns the given string. -// It is just for compatibility to other types. -func String(val string) (string, error) { - return val, nil -} - -// StringSlice converts 'val' where individual strings are separated by -// 'sep' into a string slice. -func StringSlice(val, sep string) ([]string, error) { - return strings.Split(val, sep), nil -} - -// Bool converts the given string representation of a boolean value into bool. -func Bool(val string) (bool, error) { - return strconv.ParseBool(val) -} - -// BoolSlice converts 'val' where individual booleans are separated by -// 'sep' into a bool slice. -func BoolSlice(val, sep string) ([]bool, error) { - s := strings.Split(val, sep) - values := make([]bool, len(s)) - for i, v := range s { - value, err := Bool(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float64 converts the given string representation into representation of a floating point number into float64. -func Float64(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -// Float64Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float64 slice. -func Float64Slice(val, sep string) ([]float64, error) { - s := strings.Split(val, sep) - values := make([]float64, len(s)) - for i, v := range s { - value, err := Float64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float32 converts the given string representation of a floating point number into float32. -func Float32(val string) (float32, error) { - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// Float32Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float32 slice. -func Float32Slice(val, sep string) ([]float32, error) { - s := strings.Split(val, sep) - values := make([]float32, len(s)) - for i, v := range s { - value, err := Float32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int64 converts the given string representation of an integer into int64. -func Int64(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -// Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. -func Int64Slice(val, sep string) ([]int64, error) { - s := strings.Split(val, sep) - values := make([]int64, len(s)) - for i, v := range s { - value, err := Int64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int32 converts the given string representation of an integer into int32. -func Int32(val string) (int32, error) { - i, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. -func Int32Slice(val, sep string) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Int32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint64 converts the given string representation of an integer into uint64. -func Uint64(val string) (uint64, error) { - return strconv.ParseUint(val, 0, 64) -} - -// Uint64Slice converts 'val' where individual integers are separated by -// 'sep' into a uint64 slice. -func Uint64Slice(val, sep string) ([]uint64, error) { - s := strings.Split(val, sep) - values := make([]uint64, len(s)) - for i, v := range s { - value, err := Uint64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint32 converts the given string representation of an integer into uint32. -func Uint32(val string) (uint32, error) { - i, err := strconv.ParseUint(val, 0, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// Uint32Slice converts 'val' where individual integers are separated by -// 'sep' into a uint32 slice. -func Uint32Slice(val, sep string) ([]uint32, error) { - s := strings.Split(val, sep) - values := make([]uint32, len(s)) - for i, v := range s { - value, err := Uint32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Bytes converts the given string representation of a byte sequence into a slice of bytes -// A bytes sequence is encoded in URL-safe base64 without padding -func Bytes(val string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(val) - if err != nil { - b, err = base64.URLEncoding.DecodeString(val) - if err != nil { - return nil, err - } - } - return b, nil -} - -// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. -func BytesSlice(val, sep string) ([][]byte, error) { - s := strings.Split(val, sep) - values := make([][]byte, len(s)) - for i, v := range s { - value, err := Bytes(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r *timestamp.Timestamp - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r *duration.Duration - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Enum converts the given string into an int32 that should be type casted into the -// correct enum proto type. -func Enum(val string, enumValMap map[string]int32) (int32, error) { - e, ok := enumValMap[val] - if ok { - return e, nil - } - - i, err := Int32(val) - if err != nil { - return 0, fmt.Errorf("%s is not valid", val) - } - for _, v := range enumValMap { - if v == i { - return i, nil - } - } - return 0, fmt.Errorf("%s is not valid", val) -} - -// EnumSlice converts 'val' where individual enums are separated by 'sep' -// into a int32 slice. Each individual int32 should be type casted into the -// correct enum proto type. -func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Enum(v, enumValMap) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -/* - Support fot google.protobuf.wrappers on top of primitive types -*/ - -// StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil -} - -// FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { - parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err -} - -// DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { - parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err -} - -// BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { - parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err -} - -// Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { - parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err -} - -// UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { - parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err -} - -// Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { - parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err -} - -// UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { - parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err -} - -// BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { - parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go deleted file mode 100644 index b6e5ddf7a9..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package runtime contains runtime helper functions used by -servers which protoc-gen-grpc-gateway generates. -*/ -package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index 41d54ef916..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,145 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - return http.StatusPreconditionFailed - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with the error. - // You can set a custom function to this variable to customize error format. - HTTPError = DefaultHTTPError - // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest - OtherErrorHandler = DefaultOtherErrorHandler -) - -type errorBody struct { - Error string `protobuf:"bytes,1,name=error" json:"error"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Message string `protobuf:"bytes,1,name=message" json:"message"` - Code int32 `protobuf:"varint,2,name=code" json:"code"` - Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` -} - -// Make this also conform to proto.Message for builtin JSONPb Marshaler -func (e *errorBody) Reset() { *e = errorBody{} } -func (e *errorBody) String() string { return proto.CompactTextString(e) } -func (*errorBody) ProtoMessage() {} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &errorBody{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index e1cf7a9146..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/generator" - "google.golang.org/genproto/protobuf/field_mask" -) - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node - path []string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} -} - -// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic -// that's used for naming protobuf fields in Go. -func CamelCaseFieldMask(mask *field_mask.FieldMask) { - if mask == nil || mask.Paths == nil { - return - } - - var newPaths []string - for _, path := range mask.Paths { - lowerCasedParts := strings.Split(path, ".") - var camelCasedParts []string - for _, part := range lowerCasedParts { - camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) - } - newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) - } - - mask.Paths = newPaths -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go deleted file mode 100644 index 1fc63f7f58..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ /dev/null @@ -1,215 +0,0 @@ -package runtime - -import ( - "fmt" - "io" - "net/http" - "net/textproto" - - "context" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - http.Error(w, "unexpected error", http.StatusInternalServerError) - return - } - handleForwardResponseServerMetadata(w, mux, md) - - w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - var delimiter []byte - if d, ok := marshaler.(Delimited); ok { - delimiter = d.Delimiter() - } else { - delimiter = []byte("\n") - } - - var wroteHeader bool - for { - resp, err := recv() - if err == io.EOF { - return - } - if err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - - buf, err := marshaler.Marshal(streamChunk(resp, nil)) - if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) - return - } - wroteHeader = true - if _, err = w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) - return - } - f.Flush() - } -} - -func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { - for k, vs := range md.HeaderMD { - if h, ok := mux.outgoingHeaderMatcher(k); ok { - for _, v := range vs { - w.Header().Add(h, v) - } - } - } -} - -func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { - for k := range md.TrailerMD { - tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) - w.Header().Add("Trailer", tKey) - } -} - -func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.TrailerMD { - tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for _, v := range vs { - w.Header().Add(tKey, v) - } - } -} - -// responseBody interface contains method for getting field for marshaling to the response body -// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` -type responseBody interface { - XXX_ResponseBody() interface{} -} - -// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) - } - w.Header().Set("Content-Type", contentType) - - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { - buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) - } else { - buf, err = marshaler.Marshal(resp) - } - if err != nil { - grpclog.Infof("Marshal error: %v", err) - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { - if len(opts) == 0 { - return nil - } - for _, opt := range opts { - if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) - return err - } - } - return nil -} - -func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) { - buf, merr := marshaler.Marshal(streamChunk(nil, err)) - if merr != nil { - grpclog.Infof("Failed to marshal an error: %v", merr) - return - } - if !wroteHeader { - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - w.WriteHeader(HTTPStatusFromCode(s.Code())) - } - if _, werr := w.Write(buf); werr != nil { - grpclog.Infof("Failed to notify error to client: %v", werr) - return - } -} - -func streamChunk(result proto.Message, err error) map[string]proto.Message { - if err != nil { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return map[string]proto.Message{ - "error": &internal.StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - }, - } - } - if result == nil { - return streamChunk(nil, fmt.Errorf("empty response")) - } - return map[string]proto.Message{"result": result} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go deleted file mode 100644 index f55285b5d6..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ /dev/null @@ -1,43 +0,0 @@ -package runtime - -import ( - "google.golang.org/genproto/googleapis/api/httpbody" -) - -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - -// HTTPBodyMarshaler is a Marshaler which supports marshaling of a -// google.api.HttpBody message as the full response body if it is -// the actual message used as the response. If not, then this will -// simply fallback to the Marshaler specified as its default Marshaler. -type HTTPBodyMarshaler struct { - Marshaler -} - -// ContentType implementation to keep backwards compatability with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.GetContentType() - } - return h.Marshaler.ContentType() -} - -// Marshal marshals "v" by returning the body bytes if v is a -// google.api.HttpBody message, otherwise it falls back to the default Marshaler. -func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.Data, nil - } - return h.Marshaler.Marshal(v) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go deleted file mode 100644 index f9d3a585a4..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ /dev/null @@ -1,45 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON -// with the standard "encoding/json" package of Golang. -// Although it is generally faster for simple proto messages than JSONPb, -// it does not support advanced features of protobuf, e.g. map, oneof, .... -// -// The NewEncoder and NewDecoder types return *json.Encoder and -// *json.Decoder respectively. -type JSONBuiltin struct{} - -// ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JSONBuiltin) Delimiter() []byte { - return []byte("\n") -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go deleted file mode 100644 index 3530dddd0a..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ /dev/null @@ -1,242 +0,0 @@ -package runtime - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -// -// The NewDecoder method returns a DecoderWrapper, so the underlying -// *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -var ( - // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - if v == nil { - return []byte("null"), nil - } - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Slice { - if rv.IsNil() { - if j.EmitDefaults { - return []byte("[]"), nil - } - return []byte("null"), nil - } - - if rv.Type().Elem().Implements(protoMessageType) { - var buf bytes.Buffer - err := buf.WriteByte('[') - if err != nil { - return nil, err - } - for i := 0; i < rv.Len(); i++ { - if i != 0 { - err = buf.WriteByte(',') - if err != nil { - return nil, err - } - } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { - return nil, err - } - } - err = buf.WriteByte(']') - if err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) Decoder { - d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} -} - -// DecoderWrapper is a wrapper around a *json.Decoder that adds -// support for protos to the Decode method. -type DecoderWrapper struct { - *json.Decoder -} - -// Decode wraps the embedded decoder's Decode method to support -// protos using a jsonpb.Unmarshaler. -func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() - -// Delimiter for newline encoded JSON streams. -func (j *JSONPb) Delimiter() []byte { - return []byte("\n") -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go deleted file mode 100644 index f65d1a2676..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ /dev/null @@ -1,62 +0,0 @@ -package runtime - -import ( - "io" - - "errors" - "github.com/golang/protobuf/proto" - "io/ioutil" -) - -// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes -type ProtoMarshaller struct{} - -// ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { - return "application/octet-stream" -} - -// Marshal marshals "value" into Proto -func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { - message, ok := value.(proto.Message) - if !ok { - return nil, errors.New("unable to marshal non proto field") - } - return proto.Marshal(message) -} - -// Unmarshal unmarshals proto "data" into "value" -func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { - message, ok := value.(proto.Message) - if !ok { - return errors.New("unable to unmarshal non proto field") - } - return proto.Unmarshal(data, message) -} - -// NewDecoder returns a Decoder which reads proto stream from "reader". -func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { - return DecoderFunc(func(value interface{}) error { - buffer, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - return marshaller.Unmarshal(buffer, value) - }) -} - -// NewEncoder returns an Encoder which writes proto stream into "writer". -func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { - return EncoderFunc(func(value interface{}) error { - buffer, err := marshaller.Marshal(value) - if err != nil { - return err - } - _, err = writer.Write(buffer) - if err != nil { - return err - } - - return nil - }) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go deleted file mode 100644 index 98fe6e88ac..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ /dev/null @@ -1,48 +0,0 @@ -package runtime - -import ( - "io" -) - -// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. -type Marshaler interface { - // Marshal marshals "v" into byte sequence. - Marshal(v interface{}) ([]byte, error) - // Unmarshal unmarshals "data" into "v". - // "v" must be a pointer value. - Unmarshal(data []byte, v interface{}) error - // NewDecoder returns a Decoder which reads byte sequence from "r". - NewDecoder(r io.Reader) Decoder - // NewEncoder returns an Encoder which writes bytes sequence into "w". - NewEncoder(w io.Writer) Encoder - // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Decoder decodes a byte sequence -type Decoder interface { - Decode(v interface{}) error -} - -// Encoder encodes gRPC payloads / fields into byte sequence. -type Encoder interface { - Encode(v interface{}) error -} - -// DecoderFunc adapts an decoder function into Decoder. -type DecoderFunc func(v interface{}) error - -// Decode delegates invocations to the underlying function itself. -func (f DecoderFunc) Decode(v interface{}) error { return f(v) } - -// EncoderFunc adapts an encoder function into Encoder -type EncoderFunc func(v interface{}) error - -// Encode delegates invocations to the underlying function itself. -func (f EncoderFunc) Encode(v interface{}) error { return f(v) } - -// Delimited defines the streaming delimiter. -type Delimited interface { - // Delimiter returns the record seperator for the stream. - Delimiter() []byte -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go deleted file mode 100644 index 5cc53ae4f6..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ /dev/null @@ -1,91 +0,0 @@ -package runtime - -import ( - "errors" - "net/http" -) - -// MIMEWildcard is the fallback MIME type used for requests which do not match -// a registered MIME type. -const MIMEWildcard = "*" - -var ( - acceptHeader = http.CanonicalHeaderKey("Accept") - contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - - defaultMarshaler = &JSONPb{OrigName: true} -) - -// MarshalerForRequest returns the inbound/outbound marshalers for this request. -// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. -// If it isn't set (or the request Content-Type is empty), checks for "*". -// If there are multiple Content-Type headers set, choose the first one that it can -// exactly match in the registry. -// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. -func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { - for _, acceptVal := range r.Header[acceptHeader] { - if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { - outbound = m - break - } - } - - for _, contentTypeVal := range r.Header[contentTypeHeader] { - if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { - inbound = m - break - } - } - - if inbound == nil { - inbound = mux.marshalers.mimeMap[MIMEWildcard] - } - if outbound == nil { - outbound = inbound - } - - return inbound, outbound -} - -// marshalerRegistry is a mapping from MIME types to Marshalers. -type marshalerRegistry struct { - mimeMap map[string]Marshaler -} - -// add adds a marshaler for a case-sensitive MIME type string ("*" to match any -// MIME type). -func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { - if len(mime) == 0 { - return errors.New("empty MIME type") - } - - m.mimeMap[mime] = marshaler - - return nil -} - -// makeMarshalerMIMERegistry returns a new registry of marshalers. -// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. -// -// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. -// "*" can be used to match any Content-Type. -// This can be attached to a ServerMux with the marshaler option. -func makeMarshalerMIMERegistry() marshalerRegistry { - return marshalerRegistry{ - mimeMap: map[string]Marshaler{ - MIMEWildcard: defaultMarshaler, - }, - } -} - -// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound -// Marshalers to a MIME type in mux. -func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { - return func(mux *ServeMux) { - if err := mux.marshalers.add(mime, marshaler); err != nil { - panic(err) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index ec81e55b5e..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,268 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotators []func(context.Context, *http.Request) metadata.MD - protoErrorHandler ProtoErrorHandlerFunc - disablePathLengthFallback bool -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used to handle an error as general proto message defined by gRPC. -// The response including body and status is not backward compatible with the default error handler. -// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - } - - for _, opt := range opts { - opt(serveMux) - } - - if serveMux.protoErrorHandler != nil { - HTTPError = serveMux.protoErrorHandler - // OtherErrorHandler is no longer used when protoErrorHandler is set. - // Overwritten by a special error handler to return Unknown. - OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { - ctx := context.Background() - _, outboundMarshaler := MarshalerForRequest(serveMux, r) - sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") - serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) - } - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - h.h(w, r, pathParams) - return - } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } - return - } - } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { - return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go deleted file mode 100644 index f16a84ad38..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ /dev/null @@ -1,227 +0,0 @@ -package runtime - -import ( - "errors" - "fmt" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var ( - // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. - ErrNotMatch = errors.New("not match to the path pattern") - // ErrInvalidPattern indicates that the given definition of Pattern is not valid. - ErrInvalidPattern = errors.New("invalid pattern") -) - -type op struct { - code utilities.OpCode - operand int -} - -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. -type Pattern struct { - // ops is a list of operations - ops []op - // pool is a constant pool indexed by the operands or vars. - pool []string - // vars is a list of variables names to be bound by this pattern - vars []string - // stacksize is the max depth of the stack - stacksize int - // tailLen is the length of the fixed-size segments after a deep wildcard - tailLen int - // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. - verb string -} - -// NewPattern returns a new Pattern from the given definition values. -// "ops" is a sequence of op codes. "pool" is a constant pool. -// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. -// "version" must be 1 for now. -// It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { - if version != 1 { - grpclog.Infof("unsupported version: %d", version) - return Pattern{}, ErrInvalidPattern - } - - l := len(ops) - if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) - return Pattern{}, ErrInvalidPattern - } - - var ( - typedOps []op - stack, maxstack int - tailLen int - pushMSeen bool - vars []string - ) - for i := 0; i < l; i += 2 { - op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpPushM: - if pushMSeen { - grpclog.Infof("pushM appears twice") - return Pattern{}, ErrInvalidPattern - } - pushMSeen = true - stack++ - case utilities.OpLitPush: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpConcatN: - if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - stack -= op.operand - if stack < 0 { - grpclog.Print("stack underflow") - return Pattern{}, ErrInvalidPattern - } - stack++ - case utilities.OpCapture: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - v := pool[op.operand] - op.operand = len(vars) - vars = append(vars, v) - stack-- - if stack < 0 { - grpclog.Infof("stack underflow") - return Pattern{}, ErrInvalidPattern - } - default: - grpclog.Infof("invalid opcode: %d", op.code) - return Pattern{}, ErrInvalidPattern - } - - if maxstack < stack { - maxstack = stack - } - typedOps = append(typedOps, op) - } - return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - }, nil -} - -// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. -func MustPattern(p Pattern, err error) Pattern { - if err != nil { - grpclog.Fatalf("Pattern initialization failed: %v", err) - } - return p -} - -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { - if p.verb != verb { - return nil, ErrNotMatch - } - - var pos int - stack := make([]string, 0, p.stacksize) - captured := make([]string, len(p.vars)) - l := len(components) - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush, utilities.OpLitPush: - if pos >= l { - return nil, ErrNotMatch - } - c := components[pos] - if op.code == utilities.OpLitPush { - if lit := p.pool[op.operand]; c != lit { - return nil, ErrNotMatch - } - } - stack = append(stack, c) - pos++ - case utilities.OpPushM: - end := len(components) - if end < pos+p.tailLen { - return nil, ErrNotMatch - } - end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) - pos = end - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - captured[op.operand] = stack[n] - stack = stack[:n] - } - } - if pos < l { - return nil, ErrNotMatch - } - bindings := make(map[string]string) - for i, val := range captured { - bindings[p.vars[i]] = val - } - return bindings, nil -} - -// Verb returns the verb part of the Pattern. -func (p Pattern) Verb() string { return p.verb } - -func (p Pattern) String() string { - var stack []string - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - stack = append(stack, "*") - case utilities.OpLitPush: - stack = append(stack, p.pool[op.operand]) - case utilities.OpPushM: - stack = append(stack, "**") - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) - } - } - segs := strings.Join(stack, "/") - if p.verb != "" { - return fmt.Sprintf("/%s:%s", segs, p.verb) - } - return "/" + segs -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go deleted file mode 100644 index a3151e2a55..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "github.com/golang/protobuf/proto" -) - -// StringP returns a pointer to a string whose pointee is same as the given string value. -func StringP(val string) (*string, error) { - return proto.String(val), nil -} - -// BoolP parses the given string representation of a boolean value, -// and returns a pointer to a bool whose value is same as the parsed value. -func BoolP(val string) (*bool, error) { - b, err := Bool(val) - if err != nil { - return nil, err - } - return proto.Bool(b), nil -} - -// Float64P parses the given string representation of a floating point number, -// and returns a pointer to a float64 whose value is same as the parsed number. -func Float64P(val string) (*float64, error) { - f, err := Float64(val) - if err != nil { - return nil, err - } - return proto.Float64(f), nil -} - -// Float32P parses the given string representation of a floating point number, -// and returns a pointer to a float32 whose value is same as the parsed number. -func Float32P(val string) (*float32, error) { - f, err := Float32(val) - if err != nil { - return nil, err - } - return proto.Float32(f), nil -} - -// Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. -func Int64P(val string) (*int64, error) { - i, err := Int64(val) - if err != nil { - return nil, err - } - return proto.Int64(i), nil -} - -// Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. -func Int32P(val string) (*int32, error) { - i, err := Int32(val) - if err != nil { - return nil, err - } - return proto.Int32(i), err -} - -// Uint64P parses the given string representation of an integer -// and returns a pointer to a uint64 whose value is same as the parsed integer. -func Uint64P(val string) (*uint64, error) { - i, err := Uint64(val) - if err != nil { - return nil, err - } - return proto.Uint64(i), err -} - -// Uint32P parses the given string representation of an integer -// and returns a pointer to a uint32 whose value is same as the parsed integer. -func Uint32P(val string) (*uint32, error) { - i, err := Uint32(val) - if err != nil { - return nil, err - } - return proto.Uint32(i), err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index b7fa32e45d..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "io" - "net/http" - - "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index bb9359f17c..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,392 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -// PopulateQueryParameters populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - re, err := regexp.Compile("^(.*)\\[(.*)\\]$") - if err != nil { - return err - } - match := re.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - if op, ok := props.OneofTypes[name]; ok { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := i.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "Timestamp": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.Field(0).SetInt(int64(t.Unix())) - f.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.Field(0).SetInt(s) - f.Field(1).SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.Field(0).SetBool(true) - } else if value == "false" { - f.Field(0).SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.Field(0).SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.Field(0).SetBytes(bytesVal) - return nil - } - } - - // Handle google well known types - if gwkt, ok := i.(proto.Message); ok { - switch proto.MessageName(gwkt) { - case "google.protobuf.FieldMask": - p := f.Field(0) - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79323..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go deleted file mode 100644 index cf79a4d588..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utilities provides members for internal use in grpc-gateway. -package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go deleted file mode 100644 index dfe7de4864..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go +++ /dev/null @@ -1,22 +0,0 @@ -package utilities - -// An OpCode is a opcode of compiled path patterns. -type OpCode int - -// These constants are the valid values of OpCode. -const ( - // OpNop does nothing - OpNop = OpCode(iota) - // OpPush pushes a component to stack - OpPush - // OpLitPush pushes a component to stack if it matches to the literal - OpLitPush - // OpPushM concatenates the remaining components and pushes it to stack - OpPushM - // OpConcatN pops N items from stack, concatenates them and pushes it back to stack - OpConcatN - // OpCapture pops an item and binds it to the variable - OpCapture - // OpEnd is the least positive invalid opcode. - OpEnd -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go deleted file mode 100644 index 6dd3854665..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go +++ /dev/null @@ -1,20 +0,0 @@ -package utilities - -import ( - "bytes" - "io" - "io/ioutil" -) - -// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins -// at the start of the stream -func IOReaderFactory(r io.Reader) (func() io.Reader, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - return func() io.Reader { - return bytes.NewReader(b) - }, nil -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go deleted file mode 100644 index c2b7b30dd9..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ /dev/null @@ -1,177 +0,0 @@ -package utilities - -import ( - "sort" -) - -// DoubleArray is a Double Array implementation of trie on sequences of strings. -type DoubleArray struct { - // Encoding keeps an encoding from string to int - Encoding map[string]int - // Base is the base array of Double Array - Base []int - // Check is the check array of Double Array - Check []int -} - -// NewDoubleArray builds a DoubleArray from a set of sequences of strings. -func NewDoubleArray(seqs [][]string) *DoubleArray { - da := &DoubleArray{Encoding: make(map[string]int)} - if len(seqs) == 0 { - return da - } - - encoded := registerTokens(da, seqs) - sort.Sort(byLex(encoded)) - - root := node{row: -1, col: -1, left: 0, right: len(encoded)} - addSeqs(da, encoded, 0, root) - - for i := len(da.Base); i > 0; i-- { - if da.Check[i-1] != 0 { - da.Base = da.Base[:i] - da.Check = da.Check[:i] - break - } - } - return da -} - -func registerTokens(da *DoubleArray, seqs [][]string) [][]int { - var result [][]int - for _, seq := range seqs { - var encoded []int - for _, token := range seq { - if _, ok := da.Encoding[token]; !ok { - da.Encoding[token] = len(da.Encoding) - } - encoded = append(encoded, da.Encoding[token]) - } - result = append(result, encoded) - } - for i := range result { - result[i] = append(result[i], len(da.Encoding)) - } - return result -} - -type node struct { - row, col int - left, right int -} - -func (n node) value(seqs [][]int) int { - return seqs[n.row][n.col] -} - -func (n node) children(seqs [][]int) []*node { - var result []*node - lastVal := int(-1) - last := new(node) - for i := n.left; i < n.right; i++ { - if lastVal == seqs[i][n.col+1] { - continue - } - last.right = i - last = &node{ - row: i, - col: n.col + 1, - left: i, - } - result = append(result, last) - } - last.right = n.right - return result -} - -func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { - ensureSize(da, pos) - - children := n.children(seqs) - var i int - for i = 1; ; i++ { - ok := func() bool { - for _, child := range children { - code := child.value(seqs) - j := i + code - ensureSize(da, j) - if da.Check[j] != 0 { - return false - } - } - return true - }() - if ok { - break - } - } - da.Base[pos] = i - for _, child := range children { - code := child.value(seqs) - j := i + code - da.Check[j] = pos + 1 - } - terminator := len(da.Encoding) - for _, child := range children { - code := child.value(seqs) - if code == terminator { - continue - } - j := i + code - addSeqs(da, seqs, j, *child) - } -} - -func ensureSize(da *DoubleArray, i int) { - for i >= len(da.Base) { - da.Base = append(da.Base, make([]int, len(da.Base)+1)...) - da.Check = append(da.Check, make([]int, len(da.Check)+1)...) - } -} - -type byLex [][]int - -func (l byLex) Len() int { return len(l) } -func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byLex) Less(i, j int) bool { - si := l[i] - sj := l[j] - var k int - for k = 0; k < len(si) && k < len(sj); k++ { - if si[k] < sj[k] { - return true - } - if si[k] > sj[k] { - return false - } - } - if k < len(sj) { - return true - } - return false -} - -// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. -func (da *DoubleArray) HasCommonPrefix(seq []string) bool { - if len(da.Base) == 0 { - return false - } - - var i int - for _, t := range seq { - code, ok := da.Encoding[t] - if !ok { - break - } - j := da.Base[i] + code - if len(da.Check) <= j || da.Check[j] != i+1 { - break - } - i = j - } - j := da.Base[i] + len(da.Encoding) - if len(da.Check) <= j || da.Check[j] != i+1 { - return false - } - return true -} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index 1cbe04b7d0..c8d9b0a230 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -40,35 +40,31 @@ func (c *Cache) Purge() { // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() - evicted = c.lru.Add(key, value) - c.lock.Unlock() - return evicted + defer c.lock.Unlock() + return c.lru.Add(key, value) } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() - value, ok = c.lru.Get(key) - c.lock.Unlock() - return value, ok + defer c.lock.Unlock() + return c.lru.Get(key) } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() - containKey := c.lru.Contains(key) - c.lock.RUnlock() - return containKey + defer c.lock.RUnlock() + return c.lru.Contains(key) } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() - value, ok = c.lru.Peek(key) - c.lock.RUnlock() - return value, ok + defer c.lock.RUnlock() + return c.lru.Peek(key) } // ContainsOrAdd checks if a key is in the cache without updating the @@ -102,15 +98,13 @@ func (c *Cache) RemoveOldest() { // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() - keys := c.lru.Keys() - c.lock.RUnlock() - return keys + defer c.lock.RUnlock() + return c.lru.Keys() } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() - length := c.lru.Len() - c.lock.RUnlock() - return length + defer c.lock.RUnlock() + return c.lru.Len() } diff --git a/vendor/github.com/markbates/inflect/.travis.yml b/vendor/github.com/markbates/inflect/.travis.yml deleted file mode 100644 index cf1d2c7d42..0000000000 --- a/vendor/github.com/markbates/inflect/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -language: go - -sudo: false - -matrix: - include: - - go: "1.9.x" - - go: "1.10.x" - - go: "1.11.x" - env: - - GO111MODULE=off - - go: "1.11.x" - env: - - GO111MODULE=on - - go: "tip" - env: - - GO111MODULE=off - - go: "tip" - env: - - GO111MODULE=on - allow_failures: - - go: "tip" - -install: make deps - -script: make ci-test diff --git a/vendor/github.com/markbates/inflect/README.md b/vendor/github.com/markbates/inflect/README.md deleted file mode 100644 index 9b62b1d9a4..0000000000 --- a/vendor/github.com/markbates/inflect/README.md +++ /dev/null @@ -1,214 +0,0 @@ -[![Build Status](https://travis-ci.org/markbates/inflect.svg?branch=master)](https://travis-ci.org/markbates/inflect) - -#### INSTALLATION - -go get github.com/markbates/inflect - -#### PACKAGE -package inflect - - -#### FUNCTIONS -```go -func AddAcronym(word string) -func AddHuman(suffix, replacement string) -func AddIrregular(singular, plural string) -func AddPlural(suffix, replacement string) -func AddSingular(suffix, replacement string) -func AddUncountable(word string) -func Asciify(word string) string -func Camelize(word string) string -func CamelizeDownFirst(word string) string -func Capitalize(word string) string -func Dasherize(word string) string -func ForeignKey(word string) string -func ForeignKeyCondensed(word string) string -func Humanize(word string) string -func Ordinalize(word string) string -func Parameterize(word string) string -func ParameterizeJoin(word, sep string) string -func Pluralize(word string) string -func Singularize(word string) string -func Tableize(word string) string -func Titleize(word string) string -func Typeify(word string) string -func Uncountables() map[string]bool -func Underscore(word string) string -``` - -#### TYPES -```go -type Rule struct { - // contains filtered or unexported fields -} -``` - -used by rulesets - -```go -type Ruleset struct { - // contains filtered or unexported fields -} -``` - -a Ruleset is the config of pluralization rules -you can extend the rules with the Add* methods - -``` -func NewDefaultRuleset() *Ruleset -``` -create a new ruleset and load it with the default -set of common English pluralization rules - -``` -func NewRuleset() *Ruleset -``` - -create a blank ruleset. Unless you are going to -build your own rules from scratch you probably -won't need this and can just use the defaultRuleset -via the global inflect.* methods - -``` -func (rs *Ruleset) AddAcronym(word string) -``` -if you use acronym you may need to add them to the ruleset -to prevent Underscored words of things like "HTML" coming out -as "h_t_m_l" - -``` -func (rs *Ruleset) AddHuman(suffix, replacement string) -``` - -Human rules are applied by humanize to show more friendly -versions of words - -``` -func (rs *Ruleset) AddIrregular(singular, plural string) -``` - -Add any inconsistent pluralizing/singularizing rules -to the set here. - -``` -func (rs *Ruleset) AddPlural(suffix, replacement string) -``` - -add a pluralization rule - -``` -func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) -``` - -add a pluralization rule with full string match - -``` -func (rs *Ruleset) AddSingular(suffix, replacement string) -``` - -add a singular rule - -``` -func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) -``` -same as AddSingular but you can set `exact` to force -a full string match - -``` -func (rs *Ruleset) AddUncountable(word string) -``` -add a word to this ruleset that has the same singular and plural form -for example: "rice" - -``` -func (rs *Ruleset) Asciify(word string) string -``` -transforms Latin characters like é -> e - -``` -func (rs *Ruleset) Camelize(word string) string -``` -"dino_party" -> "DinoParty" - -``` -func (rs *Ruleset) CamelizeDownFirst(word string) string -``` -same as Camelcase but with first letter downcased - -``` -func (rs *Ruleset) Capitalize(word string) string -``` -uppercase first character - -``` -func (rs *Ruleset) Dasherize(word string) string -``` -"SomeText" -> "some-text" - -``` -func (rs *Ruleset) ForeignKey(word string) string -``` -an underscored foreign key name "Person" -> "person_id" - -``` -func (rs *Ruleset) ForeignKeyCondensed(word string) string -``` -a foreign key (with an underscore) "Person" -> "personid" - -``` -func (rs *Ruleset) Humanize(word string) string -``` -First letter of sentence capitalized -Uses custom friendly replacements via AddHuman() - -``` -func (rs *Ruleset) Ordinalize(str string) string -``` -"1031" -> "1031st" - -``` -func (rs *Ruleset) Parameterize(word string) string -``` -param safe dasherized names like "my-param" - -``` -func (rs *Ruleset) ParameterizeJoin(word, sep string) string -``` -param safe dasherized names with custom separator - -``` -func (rs *Ruleset) Pluralize(word string) string -``` -returns the plural form of a singular word - -``` -func (rs *Ruleset) Singularize(word string) string -``` -returns the singular form of a plural word - -``` -func (rs *Ruleset) Tableize(word string) string -``` -Rails style pluralized table names: "SuperPerson" -> "super_people" - -``` -func (rs *Ruleset) Titleize(word string) string -``` -Capitalize every word in sentence "hello there" -> "Hello There" - -``` -func (rs *Ruleset) Typeify(word string) string -``` -"something_like_this" -> "SomethingLikeThis" - -``` -func (rs *Ruleset) Uncountables() map[string]bool -``` - -``` -func (rs *Ruleset) Underscore(word string) string -``` - -lowercase underscore version "BigBen" -> "big_ben" - - diff --git a/vendor/github.com/markbates/inflect/go.mod b/vendor/github.com/markbates/inflect/go.mod deleted file mode 100644 index 3d7baa77f7..0000000000 --- a/vendor/github.com/markbates/inflect/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/markbates/inflect - -require ( - github.com/gobuffalo/envy v1.6.5 - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/markbates/inflect/go.sum b/vendor/github.com/markbates/inflect/go.sum deleted file mode 100644 index 6d42090d0a..0000000000 --- a/vendor/github.com/markbates/inflect/go.sum +++ /dev/null @@ -1,10 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/markbates/inflect/helpers.go b/vendor/github.com/markbates/inflect/helpers.go deleted file mode 100644 index 24050c70a0..0000000000 --- a/vendor/github.com/markbates/inflect/helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -package inflect - -//Helpers is a map of the helper names with its corresponding inflect function -var Helpers = map[string]interface{}{ - "asciffy": Asciify, - "camelize": Camelize, - "camelize_down_first": CamelizeDownFirst, - "capitalize": Capitalize, - "dasherize": Dasherize, - "humanize": Humanize, - "ordinalize": Ordinalize, - "parameterize": Parameterize, - "pluralize": Pluralize, - "pluralize_with_size": PluralizeWithSize, - "singularize": Singularize, - "tableize": Tableize, - "typeify": Typeify, - "underscore": Underscore, -} diff --git a/vendor/github.com/markbates/inflect/inflect.go b/vendor/github.com/markbates/inflect/inflect.go deleted file mode 100644 index 9b6776c191..0000000000 --- a/vendor/github.com/markbates/inflect/inflect.go +++ /dev/null @@ -1,892 +0,0 @@ -package inflect - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// baseAcronyms comes from https://en.wikipedia.org/wiki/List_of_information_technology_acronymss -const baseAcronyms = `JSON,JWT,ID,UUID,SQL,ACK,ACL,ADSL,AES,ANSI,API,ARP,ATM,BGP,BSS,CAT,CCITT,CHAP,CIDR,CIR,CLI,CPE,CPU,CRC,CRT,CSMA,CMOS,DCE,DEC,DES,DHCP,DNS,DRAM,DSL,DSLAM,DTE,DMI,EHA,EIA,EIGRP,EOF,ESS,FCC,FCS,FDDI,FTP,GBIC,gbps,GEPOF,HDLC,HTTP,HTTPS,IANA,ICMP,IDF,IDS,IEEE,IETF,IMAP,IP,IPS,ISDN,ISP,kbps,LACP,LAN,LAPB,LAPF,LLC,MAC,MAN,Mbps,MC,MDF,MIB,MoCA,MPLS,MTU,NAC,NAT,NBMA,NIC,NRZ,NRZI,NVRAM,OSI,OSPF,OUI,PAP,PAT,PC,PIM,PIM,PCM,PDU,POP3,POP,POTS,PPP,PPTP,PTT,PVST,RADIUS,RAM,RARP,RFC,RIP,RLL,ROM,RSTP,RTP,RCP,SDLC,SFD,SFP,SLARP,SLIP,SMTP,SNA,SNAP,SNMP,SOF,SRAM,SSH,SSID,STP,SYN,TDM,TFTP,TIA,TOFU,UDP,URL,URI,USB,UTP,VC,VLAN,VLSM,VPN,W3C,WAN,WEP,WiFi,WPA,WWW` - -// Rule used by rulesets -type Rule struct { - suffix string - replacement string - exact bool -} - -// Ruleset a Ruleset is the config of pluralization rules -// you can extend the rules with the Add* methods -type Ruleset struct { - uncountables map[string]bool - plurals []*Rule - singulars []*Rule - humans []*Rule - acronyms []*Rule -} - -// NewRuleset creates a blank ruleset. Unless you are going to -// build your own rules from scratch you probably -// won't need this and can just use the defaultRuleset -// via the global inflect.* methods -func NewRuleset() *Ruleset { - rs := new(Ruleset) - rs.uncountables = make(map[string]bool) - rs.plurals = make([]*Rule, 0) - rs.singulars = make([]*Rule, 0) - rs.humans = make([]*Rule, 0) - rs.acronyms = make([]*Rule, 0) - return rs -} - -// NewDefaultRuleset creates a new ruleset and load it with the default -// set of common English pluralization rules -func NewDefaultRuleset() *Ruleset { - rs := NewRuleset() - rs.AddPlural("movie", "movies") - rs.AddPlural("s", "s") - rs.AddPlural("testis", "testes") - rs.AddPlural("axis", "axes") - rs.AddPlural("octopus", "octopi") - rs.AddPlural("virus", "viri") - rs.AddPlural("octopi", "octopi") - rs.AddPlural("viri", "viri") - rs.AddPlural("alias", "aliases") - rs.AddPlural("status", "statuses") - rs.AddPlural("Status", "Statuses") - rs.AddPlural("campus", "campuses") - rs.AddPlural("bus", "buses") - rs.AddPlural("buffalo", "buffaloes") - rs.AddPlural("tomato", "tomatoes") - rs.AddPlural("tum", "ta") - rs.AddPlural("ium", "ia") - rs.AddPlural("ta", "ta") - rs.AddPlural("ia", "ia") - rs.AddPlural("sis", "ses") - rs.AddPlural("lf", "lves") - rs.AddPlural("rf", "rves") - rs.AddPlural("afe", "aves") - rs.AddPlural("bfe", "bves") - rs.AddPlural("cfe", "cves") - rs.AddPlural("dfe", "dves") - rs.AddPlural("efe", "eves") - rs.AddPlural("gfe", "gves") - rs.AddPlural("hfe", "hves") - rs.AddPlural("ife", "ives") - rs.AddPlural("jfe", "jves") - rs.AddPlural("kfe", "kves") - rs.AddPlural("lfe", "lves") - rs.AddPlural("mfe", "mves") - rs.AddPlural("nfe", "nves") - rs.AddPlural("ofe", "oves") - rs.AddPlural("pfe", "pves") - rs.AddPlural("qfe", "qves") - rs.AddPlural("rfe", "rves") - rs.AddPlural("sfe", "sves") - rs.AddPlural("tfe", "tves") - rs.AddPlural("ufe", "uves") - rs.AddPlural("vfe", "vves") - rs.AddPlural("wfe", "wves") - rs.AddPlural("xfe", "xves") - rs.AddPlural("yfe", "yves") - rs.AddPlural("zfe", "zves") - rs.AddPlural("hive", "hives") - rs.AddPlural("quy", "quies") - rs.AddPlural("by", "bies") - rs.AddPlural("cy", "cies") - rs.AddPlural("dy", "dies") - rs.AddPlural("fy", "fies") - rs.AddPlural("gy", "gies") - rs.AddPlural("hy", "hies") - rs.AddPlural("jy", "jies") - rs.AddPlural("ky", "kies") - rs.AddPlural("ly", "lies") - rs.AddPlural("my", "mies") - rs.AddPlural("ny", "nies") - rs.AddPlural("py", "pies") - rs.AddPlural("qy", "qies") - rs.AddPlural("ry", "ries") - rs.AddPlural("sy", "sies") - rs.AddPlural("ty", "ties") - rs.AddPlural("vy", "vies") - rs.AddPlural("wy", "wies") - rs.AddPlural("xy", "xies") - rs.AddPlural("zy", "zies") - rs.AddPlural("x", "xes") - rs.AddPlural("ch", "ches") - rs.AddPlural("ss", "sses") - rs.AddPlural("sh", "shes") - rs.AddPlural("matrix", "matrices") - rs.AddPlural("vertix", "vertices") - rs.AddPlural("indix", "indices") - rs.AddPlural("matrex", "matrices") - rs.AddPlural("vertex", "vertices") - rs.AddPlural("index", "indices") - rs.AddPlural("mouse", "mice") - rs.AddPlural("louse", "lice") - rs.AddPlural("mice", "mice") - rs.AddPlural("lice", "lice") - rs.AddPlural("ress", "resses") - rs.AddPluralExact("ox", "oxen", true) - rs.AddPluralExact("oxen", "oxen", true) - rs.AddPluralExact("quiz", "quizzes", true) - rs.AddSingular("s", "") - rs.AddSingular("ss", "ss") - rs.AddSingular("news", "news") - rs.AddSingular("ta", "tum") - rs.AddSingular("ia", "ium") - rs.AddSingular("analyses", "analysis") - rs.AddSingular("bases", "basis") - rs.AddSingularExact("basis", "basis", true) - rs.AddSingular("diagnoses", "diagnosis") - rs.AddSingularExact("diagnosis", "diagnosis", true) - rs.AddSingular("parentheses", "parenthesis") - rs.AddSingular("prognoses", "prognosis") - rs.AddSingular("synopses", "synopsis") - rs.AddSingular("theses", "thesis") - rs.AddSingular("analyses", "analysis") - rs.AddSingularExact("analysis", "analysis", true) - rs.AddSingular("ovies", "ovie") - rs.AddSingular("aves", "afe") - rs.AddSingular("bves", "bfe") - rs.AddSingular("cves", "cfe") - rs.AddSingular("dves", "dfe") - rs.AddSingular("eves", "efe") - rs.AddSingular("gves", "gfe") - rs.AddSingular("hves", "hfe") - rs.AddSingular("ives", "ife") - rs.AddSingular("jves", "jfe") - rs.AddSingular("kves", "kfe") - rs.AddSingular("lves", "lfe") - rs.AddSingular("mves", "mfe") - rs.AddSingular("nves", "nfe") - rs.AddSingular("oves", "ofe") - rs.AddSingular("pves", "pfe") - rs.AddSingular("qves", "qfe") - rs.AddSingular("rves", "rfe") - rs.AddSingular("sves", "sfe") - rs.AddSingular("tves", "tfe") - rs.AddSingular("uves", "ufe") - rs.AddSingular("vves", "vfe") - rs.AddSingular("wves", "wfe") - rs.AddSingular("xves", "xfe") - rs.AddSingular("yves", "yfe") - rs.AddSingular("zves", "zfe") - rs.AddSingular("hives", "hive") - rs.AddSingular("tives", "tive") - rs.AddSingular("lves", "lf") - rs.AddSingular("rves", "rf") - rs.AddSingular("quies", "quy") - rs.AddSingular("bies", "by") - rs.AddSingular("cies", "cy") - rs.AddSingular("dies", "dy") - rs.AddSingular("fies", "fy") - rs.AddSingular("gies", "gy") - rs.AddSingular("hies", "hy") - rs.AddSingular("jies", "jy") - rs.AddSingular("kies", "ky") - rs.AddSingular("lies", "ly") - rs.AddSingular("mies", "my") - rs.AddSingular("nies", "ny") - rs.AddSingular("pies", "py") - rs.AddSingular("qies", "qy") - rs.AddSingular("ries", "ry") - rs.AddSingular("sies", "sy") - rs.AddSingular("ties", "ty") - // rs.AddSingular("vies", "vy") - rs.AddSingular("wies", "wy") - rs.AddSingular("xies", "xy") - rs.AddSingular("zies", "zy") - rs.AddSingular("series", "series") - rs.AddSingular("xes", "x") - rs.AddSingular("ches", "ch") - rs.AddSingular("sses", "ss") - rs.AddSingular("shes", "sh") - rs.AddSingular("mice", "mouse") - rs.AddSingular("lice", "louse") - rs.AddSingular("buses", "bus") - rs.AddSingularExact("bus", "bus", true) - rs.AddSingular("oes", "o") - rs.AddSingular("shoes", "shoe") - rs.AddSingular("crises", "crisis") - rs.AddSingularExact("crisis", "crisis", true) - rs.AddSingular("axes", "axis") - rs.AddSingularExact("axis", "axis", true) - rs.AddSingular("testes", "testis") - rs.AddSingularExact("testis", "testis", true) - rs.AddSingular("octopi", "octopus") - rs.AddSingularExact("octopus", "octopus", true) - rs.AddSingular("viri", "virus") - rs.AddSingularExact("virus", "virus", true) - rs.AddSingular("statuses", "status") - rs.AddSingular("Statuses", "Status") - rs.AddSingular("campuses", "campus") - rs.AddSingularExact("status", "status", true) - rs.AddSingularExact("Status", "Status", true) - rs.AddSingularExact("campus", "campus", true) - rs.AddSingular("aliases", "alias") - rs.AddSingularExact("alias", "alias", true) - rs.AddSingularExact("oxen", "ox", true) - rs.AddSingular("vertices", "vertex") - rs.AddSingular("indices", "index") - rs.AddSingular("matrices", "matrix") - rs.AddSingularExact("quizzes", "quiz", true) - rs.AddSingular("databases", "database") - rs.AddSingular("resses", "ress") - rs.AddSingular("ress", "ress") - rs.AddIrregular("person", "people") - rs.AddIrregular("man", "men") - rs.AddIrregular("child", "children") - rs.AddIrregular("sex", "sexes") - rs.AddIrregular("move", "moves") - rs.AddIrregular("zombie", "zombies") - rs.AddIrregular("Status", "Statuses") - rs.AddIrregular("status", "statuses") - rs.AddIrregular("campus", "campuses") - rs.AddIrregular("human", "humans") - rs.AddUncountable("equipment") - rs.AddUncountable("information") - rs.AddUncountable("rice") - rs.AddUncountable("money") - rs.AddUncountable("species") - rs.AddUncountable("series") - rs.AddUncountable("fish") - rs.AddUncountable("sheep") - rs.AddUncountable("jeans") - rs.AddUncountable("police") - - acronyms := strings.Split(baseAcronyms, ",") - for _, acr := range acronyms { - rs.AddAcronym(acr) - } - - return rs -} - -// Uncountables returns a map of uncountables in the ruleset -func (rs *Ruleset) Uncountables() map[string]bool { - return rs.uncountables -} - -// AddPlural add a pluralization rule -func (rs *Ruleset) AddPlural(suffix, replacement string) { - rs.AddPluralExact(suffix, replacement, false) -} - -// AddPluralExact add a pluralization rule with full string match -func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) { - // remove uncountable - delete(rs.uncountables, suffix) - // create rule - r := new(Rule) - r.suffix = suffix - r.replacement = replacement - r.exact = exact - // prepend - rs.plurals = append([]*Rule{r}, rs.plurals...) -} - -// AddSingular add a singular rule -func (rs *Ruleset) AddSingular(suffix, replacement string) { - rs.AddSingularExact(suffix, replacement, false) -} - -// AddSingularExact same as AddSingular but you can set `exact` to force -// a full string match -func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) { - // remove from uncountable - delete(rs.uncountables, suffix) - // create rule - r := new(Rule) - r.suffix = suffix - r.replacement = replacement - r.exact = exact - rs.singulars = append([]*Rule{r}, rs.singulars...) -} - -// AddHuman Human rules are applied by humanize to show more friendly -// versions of words -func (rs *Ruleset) AddHuman(suffix, replacement string) { - r := new(Rule) - r.suffix = suffix - r.replacement = replacement - rs.humans = append([]*Rule{r}, rs.humans...) -} - -// AddIrregular Add any inconsistent pluralizing/singularizing rules -// to the set here. -func (rs *Ruleset) AddIrregular(singular, plural string) { - delete(rs.uncountables, singular) - delete(rs.uncountables, plural) - rs.AddPlural(singular, plural) - rs.AddPlural(plural, plural) - rs.AddSingular(plural, singular) -} - -// AddAcronym if you use acronym you may need to add them to the ruleset -// to prevent Underscored words of things like "HTML" coming out -// as "h_t_m_l" -func (rs *Ruleset) AddAcronym(word string) { - r := new(Rule) - r.suffix = word - r.replacement = rs.Titleize(strings.ToLower(word)) - rs.acronyms = append(rs.acronyms, r) -} - -// AddUncountable add a word to this ruleset that has the same singular and plural form -// for example: "rice" -func (rs *Ruleset) AddUncountable(word string) { - rs.uncountables[strings.ToLower(word)] = true -} - -func (rs *Ruleset) isUncountable(word string) bool { - // handle multiple words by using the last one - words := strings.Split(word, " ") - if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists { - return true - } - return false -} - -//isAcronym returns if a word is acronym or not. -func (rs *Ruleset) isAcronym(word string) bool { - for _, rule := range rs.acronyms { - if strings.ToUpper(rule.suffix) == strings.ToUpper(word) { - return true - } - } - - return false -} - -//PluralizeWithSize pluralize with taking number into account -func (rs *Ruleset) PluralizeWithSize(word string, size int) string { - if size == 1 { - return rs.Singularize(word) - } - return rs.Pluralize(word) -} - -// Pluralize returns the plural form of a singular word -func (rs *Ruleset) Pluralize(word string) string { - if len(word) == 0 { - return word - } - lWord := strings.ToLower(word) - if rs.isUncountable(lWord) { - return word - } - - var candidate string - for _, rule := range rs.plurals { - if rule.exact { - if lWord == rule.suffix { - // Capitalized word - if lWord[0] != word[0] && lWord[1:] == word[1:] { - return rs.Capitalize(rule.replacement) - } - return rule.replacement - } - continue - } - - if strings.EqualFold(word, rule.suffix) { - candidate = rule.replacement - } - - if strings.HasSuffix(word, rule.suffix) { - return replaceLast(word, rule.suffix, rule.replacement) - } - } - - if candidate != "" { - return candidate - } - return word + "s" -} - -//Singularize returns the singular form of a plural word -func (rs *Ruleset) Singularize(word string) string { - if len(word) <= 1 { - return word - } - lWord := strings.ToLower(word) - if rs.isUncountable(lWord) { - return word - } - - var candidate string - - for _, rule := range rs.singulars { - if rule.exact { - if lWord == rule.suffix { - // Capitalized word - if lWord[0] != word[0] && lWord[1:] == word[1:] { - return rs.Capitalize(rule.replacement) - } - return rule.replacement - } - continue - } - - if strings.EqualFold(word, rule.suffix) { - candidate = rule.replacement - } - - if strings.HasSuffix(word, rule.suffix) { - return replaceLast(word, rule.suffix, rule.replacement) - } - } - - if candidate != "" { - return candidate - } - - return word -} - -//Capitalize uppercase first character -func (rs *Ruleset) Capitalize(word string) string { - if rs.isAcronym(word) { - return strings.ToUpper(word) - } - return strings.ToUpper(word[:1]) + word[1:] -} - -//Camelize "dino_party" -> "DinoParty" -func (rs *Ruleset) Camelize(word string) string { - if rs.isAcronym(word) { - return strings.ToUpper(word) - } - words := splitAtCaseChangeWithTitlecase(word) - return strings.Join(words, "") -} - -//CamelizeDownFirst same as Camelcase but with first letter downcased -func (rs *Ruleset) CamelizeDownFirst(word string) string { - word = Camelize(word) - return strings.ToLower(word[:1]) + word[1:] -} - -//Titleize Capitalize every word in sentence "hello there" -> "Hello There" -func (rs *Ruleset) Titleize(word string) string { - words := splitAtCaseChangeWithTitlecase(word) - result := strings.Join(words, " ") - - var acronymWords []string - for index, word := range words { - if len(word) == 1 { - acronymWords = append(acronymWords, word) - } - - if len(word) > 1 || index == len(words)-1 || len(acronymWords) > 1 { - acronym := strings.Join(acronymWords, "") - if !rs.isAcronym(acronym) { - acronymWords = acronymWords[:len(acronymWords)] - continue - } - - result = strings.Replace(result, strings.Join(acronymWords, " "), acronym, 1) - acronymWords = []string{} - } - } - - return result -} - -func (rs *Ruleset) safeCaseAcronyms(word string) string { - // convert an acronym like HTML into Html - for _, rule := range rs.acronyms { - word = strings.Replace(word, rule.suffix, rule.replacement, -1) - } - return word -} - -func (rs *Ruleset) separatedWords(word, sep string) string { - word = rs.safeCaseAcronyms(word) - words := splitAtCaseChange(word) - return strings.Join(words, sep) -} - -//Underscore lowercase underscore version "BigBen" -> "big_ben" -func (rs *Ruleset) Underscore(word string) string { - return rs.separatedWords(word, "_") -} - -//Humanize First letter of sentence capitalized -// Uses custom friendly replacements via AddHuman() -func (rs *Ruleset) Humanize(word string) string { - word = replaceLast(word, "_id", "") // strip foreign key kinds - // replace and strings in humans list - for _, rule := range rs.humans { - word = strings.Replace(word, rule.suffix, rule.replacement, -1) - } - sentence := rs.separatedWords(word, " ") - - r, n := utf8.DecodeRuneInString(sentence) - return string(unicode.ToUpper(r)) + sentence[n:] -} - -//ForeignKey an underscored foreign key name "Person" -> "person_id" -func (rs *Ruleset) ForeignKey(word string) string { - return rs.Underscore(rs.Singularize(word)) + "_id" -} - -//ForeignKeyCondensed a foreign key (with an underscore) "Person" -> "personid" -func (rs *Ruleset) ForeignKeyCondensed(word string) string { - return rs.Underscore(word) + "id" -} - -//Tableize Rails style pluralized table names: "SuperPerson" -> "super_people" -func (rs *Ruleset) Tableize(word string) string { - return rs.Pluralize(rs.Underscore(rs.Typeify(word))) -} - -var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`) - -//Parameterize param safe dasherized names like "my-param" -func (rs *Ruleset) Parameterize(word string) string { - return ParameterizeJoin(word, "-") -} - -//ParameterizeJoin param safe dasherized names with custom separator -func (rs *Ruleset) ParameterizeJoin(word, sep string) string { - word = strings.ToLower(word) - word = rs.Asciify(word) - word = notUrlSafe.ReplaceAllString(word, "") - word = strings.Replace(word, " ", sep, -1) - if len(sep) > 0 { - squash, err := regexp.Compile(sep + "+") - if err == nil { - word = squash.ReplaceAllString(word, sep) - } - } - word = strings.Trim(word, sep+" ") - return word -} - -var lookalikes = map[string]*regexp.Regexp{ - "A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`), - "AE": regexp.MustCompile(`Æ`), - "C": regexp.MustCompile(`Ç`), - "E": regexp.MustCompile(`È|É|Ê|Ë`), - "G": regexp.MustCompile(`Ğ`), - "I": regexp.MustCompile(`Ì|Í|Î|Ï|İ`), - "N": regexp.MustCompile(`Ñ`), - "O": regexp.MustCompile(`Ò|Ó|Ô|Õ|Ö|Ø`), - "S": regexp.MustCompile(`Ş`), - "U": regexp.MustCompile(`Ù|Ú|Û|Ü`), - "Y": regexp.MustCompile(`Ý`), - "ss": regexp.MustCompile(`ß`), - "a": regexp.MustCompile(`à|á|â|ã|ä|å`), - "ae": regexp.MustCompile(`æ`), - "c": regexp.MustCompile(`ç`), - "e": regexp.MustCompile(`è|é|ê|ë`), - "g": regexp.MustCompile(`ğ`), - "i": regexp.MustCompile(`ì|í|î|ï|ı`), - "n": regexp.MustCompile(`ñ`), - "o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`), - "s": regexp.MustCompile(`ş`), - "u": regexp.MustCompile(`ù|ú|û|ü|ũ|ū|ŭ|ů|ű|ų`), - "y": regexp.MustCompile(`ý|ÿ`), -} - -//Asciify transforms Latin characters like é -> e -func (rs *Ruleset) Asciify(word string) string { - for repl, regex := range lookalikes { - word = regex.ReplaceAllString(word, repl) - } - return word -} - -var tablePrefix = regexp.MustCompile(`^[^.]*\.`) - -//Typeify "something_like_this" -> "SomethingLikeThis" -func (rs *Ruleset) Typeify(word string) string { - word = tablePrefix.ReplaceAllString(word, "") - return rs.Camelize(rs.Singularize(word)) -} - -//Dasherize "SomeText" -> "some-text" -func (rs *Ruleset) Dasherize(word string) string { - return rs.separatedWords(word, "-") -} - -//Ordinalize "1031" -> "1031st" -func (rs *Ruleset) Ordinalize(str string) string { - number, err := strconv.Atoi(str) - if err != nil { - return str - } - switch abs(number) % 100 { - case 11, 12, 13: - return fmt.Sprintf("%dth", number) - default: - switch abs(number) % 10 { - case 1: - return fmt.Sprintf("%dst", number) - case 2: - return fmt.Sprintf("%dnd", number) - case 3: - return fmt.Sprintf("%drd", number) - } - } - return fmt.Sprintf("%dth", number) -} - -//ForeignKeyToAttribute returns the attribute name from the foreign key -func (rs *Ruleset) ForeignKeyToAttribute(str string) string { - w := rs.Camelize(str) - if strings.HasSuffix(w, "Id") { - return strings.TrimSuffix(w, "Id") + "ID" - } - return w -} - -//LoadReader loads rules from io.Reader param -func (rs *Ruleset) LoadReader(r io.Reader) error { - m := map[string]string{} - err := json.NewDecoder(r).Decode(&m) - if err != nil { - return fmt.Errorf("could not decode inflection JSON from reader: %s", err) - } - for s, p := range m { - defaultRuleset.AddIrregular(s, p) - } - return nil -} - -///////////////////////////////////////// -// the default global ruleset -////////////////////////////////////////// - -var defaultRuleset *Ruleset - -//LoadReader loads rules from io.Reader param -func LoadReader(r io.Reader) error { - return defaultRuleset.LoadReader(r) -} - -func init() { - defaultRuleset = NewDefaultRuleset() - - pwd, _ := os.Getwd() - cfg := filepath.Join(pwd, "inflections.json") - if p := os.Getenv("INFLECT_PATH"); p != "" { - cfg = p - } - if _, err := os.Stat(cfg); err == nil { - b, err := ioutil.ReadFile(cfg) - if err != nil { - fmt.Printf("could not read inflection file %s (%s)\n", cfg, err) - return - } - if err = defaultRuleset.LoadReader(bytes.NewReader(b)); err != nil { - fmt.Println(err) - } - } -} - -//Uncountables returns a list of uncountables rules -func Uncountables() map[string]bool { - return defaultRuleset.Uncountables() -} - -//AddPlural adds plural to the ruleset -func AddPlural(suffix, replacement string) { - defaultRuleset.AddPlural(suffix, replacement) -} - -//AddSingular adds singular to the ruleset -func AddSingular(suffix, replacement string) { - defaultRuleset.AddSingular(suffix, replacement) -} - -//AddHuman adds human -func AddHuman(suffix, replacement string) { - defaultRuleset.AddHuman(suffix, replacement) -} - -func AddIrregular(singular, plural string) { - defaultRuleset.AddIrregular(singular, plural) -} - -func AddAcronym(word string) { - defaultRuleset.AddAcronym(word) -} - -func AddUncountable(word string) { - defaultRuleset.AddUncountable(word) -} - -func Pluralize(word string) string { - return defaultRuleset.Pluralize(word) -} - -func PluralizeWithSize(word string, size int) string { - return defaultRuleset.PluralizeWithSize(word, size) -} - -func Singularize(word string) string { - return defaultRuleset.Singularize(word) -} - -func Capitalize(word string) string { - return defaultRuleset.Capitalize(word) -} - -func Camelize(word string) string { - return defaultRuleset.Camelize(word) -} - -func CamelizeDownFirst(word string) string { - return defaultRuleset.CamelizeDownFirst(word) -} - -func Titleize(word string) string { - return defaultRuleset.Titleize(word) -} - -func Underscore(word string) string { - return defaultRuleset.Underscore(word) -} - -func Humanize(word string) string { - return defaultRuleset.Humanize(word) -} - -func ForeignKey(word string) string { - return defaultRuleset.ForeignKey(word) -} - -func ForeignKeyCondensed(word string) string { - return defaultRuleset.ForeignKeyCondensed(word) -} - -func Tableize(word string) string { - return defaultRuleset.Tableize(word) -} - -func Parameterize(word string) string { - return defaultRuleset.Parameterize(word) -} - -func ParameterizeJoin(word, sep string) string { - return defaultRuleset.ParameterizeJoin(word, sep) -} - -func Typeify(word string) string { - return defaultRuleset.Typeify(word) -} - -func Dasherize(word string) string { - return defaultRuleset.Dasherize(word) -} - -func Ordinalize(word string) string { - return defaultRuleset.Ordinalize(word) -} - -func Asciify(word string) string { - return defaultRuleset.Asciify(word) -} - -func ForeignKeyToAttribute(word string) string { - return defaultRuleset.ForeignKeyToAttribute(word) -} - -// helper funcs - -func reverse(s string) string { - o := make([]rune, utf8.RuneCountInString(s)) - i := len(o) - for _, c := range s { - i-- - o[i] = c - } - return string(o) -} - -func isSpacerChar(c rune) bool { - switch { - case c == rune("_"[0]): - return true - case c == rune(" "[0]): - return true - case c == rune(":"[0]): - return true - case c == rune("-"[0]): - return true - } - return false -} - -func splitAtCaseChange(s string) []string { - words := make([]string, 0) - word := make([]rune, 0) - for _, c := range s { - spacer := isSpacerChar(c) - if len(word) > 0 { - if unicode.IsUpper(c) || spacer { - words = append(words, string(word)) - word = make([]rune, 0) - } - } - if !spacer { - word = append(word, unicode.ToLower(c)) - } - } - words = append(words, string(word)) - return words -} - -func splitAtCaseChangeWithTitlecase(s string) []string { - words := make([]string, 0) - word := make([]rune, 0) - - for _, c := range s { - spacer := isSpacerChar(c) - if len(word) > 0 { - if unicode.IsUpper(c) || spacer { - words = append(words, string(word)) - word = make([]rune, 0) - } - } - if !spacer { - if len(word) > 0 { - word = append(word, unicode.ToLower(c)) - } else { - word = append(word, unicode.ToUpper(c)) - } - } - } - - words = append(words, string(word)) - return words -} - -func replaceLast(s, match, repl string) string { - // reverse strings - srev := reverse(s) - mrev := reverse(match) - rrev := reverse(repl) - // match first and reverse back - return reverse(strings.Replace(srev, mrev, rrev, 1)) -} - -func abs(x int) int { - if x < 0 { - return -x - } - return x -} diff --git a/vendor/github.com/markbates/inflect/name.go b/vendor/github.com/markbates/inflect/name.go deleted file mode 100644 index e6863e28a6..0000000000 --- a/vendor/github.com/markbates/inflect/name.go +++ /dev/null @@ -1,163 +0,0 @@ -package inflect - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/gobuffalo/envy" -) - -// Name is a string that represents the "name" of a thing, like an app, model, etc... -type Name string - -// Title version of a name. ie. "foo_bar" => "Foo Bar" -func (n Name) Title() string { - x := strings.Split(string(n), "/") - for i, s := range x { - x[i] = Titleize(s) - } - - return strings.Join(x, " ") -} - -// Underscore version of a name. ie. "FooBar" => "foo_bar" -func (n Name) Underscore() string { - w := string(n) - if strings.ToUpper(w) == w { - return strings.ToLower(w) - } - return Underscore(w) -} - -// Plural version of a name -func (n Name) Plural() string { - return Pluralize(string(n)) -} - -// Singular version of a name -func (n Name) Singular() string { - return Singularize(string(n)) -} - -// Camel version of a name -func (n Name) Camel() string { - c := Camelize(string(n)) - if strings.HasSuffix(c, "Id") { - c = strings.TrimSuffix(c, "Id") - c += "ID" - } - return c -} - -// Model version of a name. ie. "user" => "User" -func (n Name) Model() string { - x := strings.Split(string(n), "/") - for i, s := range x { - x[i] = Camelize(Singularize(s)) - } - - return strings.Join(x, "") -} - -// Resource version of a name -func (n Name) Resource() string { - name := n.Underscore() - x := strings.FieldsFunc(name, func(r rune) bool { - return r == '_' || r == '/' - }) - - for i, w := range x { - if i == len(x)-1 { - x[i] = Camelize(Pluralize(strings.ToLower(w))) - continue - } - - x[i] = Camelize(w) - } - - return strings.Join(x, "") -} - -// ModelPlural version of a name. ie. "user" => "Users" -func (n Name) ModelPlural() string { - return Camelize(Pluralize(n.Model())) -} - -// File version of a name -func (n Name) File() string { - return Underscore(Camelize(string(n))) -} - -// Table version of a name -func (n Name) Table() string { - return Underscore(Pluralize(string(n))) -} - -// UnderSingular version of a name -func (n Name) UnderSingular() string { - return Underscore(Singularize(string(n))) -} - -// PluralCamel version of a name -func (n Name) PluralCamel() string { - return Pluralize(Camelize(string(n))) -} - -// PluralUnder version of a name -func (n Name) PluralUnder() string { - return Pluralize(Underscore(string(n))) -} - -// URL version of a name -func (n Name) URL() string { - return n.PluralUnder() -} - -// CamelSingular version of a name -func (n Name) CamelSingular() string { - return Camelize(Singularize(string(n))) -} - -// VarCaseSingular version of a name. ie. "FooBar" => "fooBar" -func (n Name) VarCaseSingular() string { - return CamelizeDownFirst(Singularize(Underscore(n.Resource()))) -} - -// VarCasePlural version of a name. ie. "FooBar" => "fooBar" -func (n Name) VarCasePlural() string { - return CamelizeDownFirst(n.Resource()) -} - -// Lower case version of a string -func (n Name) Lower() string { - return strings.ToLower(string(n)) -} - -// ParamID returns foo_bar_id -func (n Name) ParamID() string { - return fmt.Sprintf("%s_id", strings.Replace(n.UnderSingular(), "/", "_", -1)) -} - -// Package returns go package -func (n Name) Package() string { - key := string(n) - - for _, gp := range envy.GoPaths() { - key = strings.TrimPrefix(key, filepath.Join(gp, "src")) - key = strings.TrimPrefix(key, gp) - } - key = strings.TrimPrefix(key, string(filepath.Separator)) - - key = strings.Replace(key, "\\", "/", -1) - return key -} - -// Char returns first character in lower case, this is useful for methods inside a struct. -func (n Name) Char() string { - return strings.ToLower(string(n[0])) -} - -func (n Name) String() string { - return string(n) -} diff --git a/vendor/github.com/markbates/inflect/shoulders.md b/vendor/github.com/markbates/inflect/shoulders.md deleted file mode 100644 index 619e7892f7..0000000000 --- a/vendor/github.com/markbates/inflect/shoulders.md +++ /dev/null @@ -1,12 +0,0 @@ -# github.com/markbates/inflect Stands on the Shoulders of Giants - -github.com/markbates/inflect does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work. - -Thank you to the following **GIANTS**: - - -* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy) - -* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv) - -* [github.com/markbates/inflect](https://godoc.org/github.com/markbates/inflect) diff --git a/vendor/github.com/markbates/inflect/version.go b/vendor/github.com/markbates/inflect/version.go deleted file mode 100644 index a167449841..0000000000 --- a/vendor/github.com/markbates/inflect/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package inflect - -const Version = "v1.0.4" diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 4d71367f6f..2420a5d070 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,12 +1,9 @@ language: go go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - 1.10.x - 1.11.x + - 1.12.x env: - GO111MODULE=on @@ -17,7 +14,4 @@ install: - go get github.com/onsi/ginkgo - go install github.com/onsi/ginkgo/ginkgo -script: | - $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race && - go vet && - [ -z "`gofmt -l -e -s -w .`" ] +script: make test diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9153294f75..5d1eda8374 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,14 @@ +## 1.5.0 + +### Features + +- Added MatchKeys matchers [8b909fc] + +### Fixes and Minor Improvements + +- Add type aliases to remove stuttering [03b0461] +- Don't run session_test.go on windows (#324) [5533ce8] + ## 1.4.3 ### Fixes: diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile new file mode 100644 index 0000000000..c92cd56e3a --- /dev/null +++ b/vendor/github.com/onsi/gomega/Makefile @@ -0,0 +1,6 @@ +test: + [ -z "`gofmt -s -w -l -e .`" ] + go vet + ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race + +.PHONY: test diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 471f691a62..448d595da6 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.4.3" +const GOMEGA_VERSION = "1.5.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -39,20 +39,14 @@ var defaultEventuallyPollingInterval = 10 * time.Millisecond var defaultConsistentlyDuration = 100 * time.Millisecond var defaultConsistentlyPollingInterval = 10 * time.Millisecond -//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails -//the fail handler passed into RegisterFailHandler is called. +// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +// the fail handler passed into RegisterFailHandler is called. func RegisterFailHandler(handler types.GomegaFailHandler) { - if handler == nil { - globalFailWrapper = nil - return - } - - globalFailWrapper = &types.GomegaFailWrapper{ - Fail: handler, - TWithHelper: testingtsupport.EmptyTWithHelper{}, - } + RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler) } +// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler +// are used globally. func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { if handler == nil { globalFailWrapper = nil @@ -65,12 +59,12 @@ func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandl } } -//RegisterTestingT connects Gomega to Golang's XUnit style -//Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead. +// RegisterTestingT connects Gomega to Golang's XUnit style +// Testing.T tests. It is now deprecated and you should use NewWithT() instead. // -//Legacy Documentation: +// Legacy Documentation: // -//You'll need to call this at the top of each XUnit style test: +// You'll need to call this at the top of each XUnit style test: // // func TestFarmHasCow(t *testing.T) { // RegisterTestingT(t) @@ -83,7 +77,7 @@ func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandl // pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests // in parallel as the global fail handler cannot point to more than one testing.T at a time. // -// NewGomegaWithT() does not have this limitation +// NewWithT() does not have this limitation // // (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). func RegisterTestingT(t types.GomegaTestingT) { @@ -95,15 +89,15 @@ func RegisterTestingT(t types.GomegaTestingT) { RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) } -//InterceptGomegaHandlers runs a given callback and returns an array of -//failure messages generated by any Gomega assertions within the callback. +// InterceptGomegaFailures runs a given callback and returns an array of +// failure messages generated by any Gomega assertions within the callback. // -//This is accomplished by temporarily replacing the *global* fail handler -//with a fail handler that simply annotates failures. The original fail handler -//is reset when InterceptGomegaFailures returns. +// This is accomplished by temporarily replacing the *global* fail handler +// with a fail handler that simply annotates failures. The original fail handler +// is reset when InterceptGomegaFailures returns. // -//This is most useful when testing custom matchers, but can also be used to check -//on a value using a Gomega assertion without causing a test failure. +// This is most useful when testing custom matchers, but can also be used to check +// on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { originalHandler := globalFailWrapper.Fail failures := []string{} @@ -115,108 +109,108 @@ func InterceptGomegaFailures(f func()) []string { return failures } -//Ω wraps an actual value allowing assertions to be made on it: +// Ω wraps an actual value allowing assertions to be made on it: // Ω("foo").Should(Equal("foo")) // -//If Ω is passed more than one argument it will pass the *first* argument to the matcher. -//All subsequent arguments will be required to be nil/zero. +// If Ω is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. // -//This is convenient if you want to make an assertion on a method/function that returns -//a value and an error - a common patter in Go. +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. // -//For example, given a function with signature: -// func MyAmazingThing() (int, error) +// For example, given a function with signature: +// func MyAmazingThing() (int, error) // -//Then: +// Then: // Ω(MyAmazingThing()).Should(Equal(3)) -//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` // -//Ω and Expect are identical -func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { +// Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) Assertion { return ExpectWithOffset(0, actual, extra...) } -//Expect wraps an actual value allowing assertions to be made on it: +// Expect wraps an actual value allowing assertions to be made on it: // Expect("foo").To(Equal("foo")) // -//If Expect is passed more than one argument it will pass the *first* argument to the matcher. -//All subsequent arguments will be required to be nil/zero. +// If Expect is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. // -//This is convenient if you want to make an assertion on a method/function that returns -//a value and an error - a common patter in Go. +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. // -//For example, given a function with signature: -// func MyAmazingThing() (int, error) +// For example, given a function with signature: +// func MyAmazingThing() (int, error) // -//Then: +// Then: // Expect(MyAmazingThing()).Should(Equal(3)) -//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` // -//Expect and Ω are identical -func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { +// Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) Assertion { return ExpectWithOffset(0, actual, extra...) } -//ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset wraps an actual value allowing assertions to be made on it: // ExpectWithOffset(1, "foo").To(Equal("foo")) // -//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument -//this is used to modify the call-stack offset when computing line numbers. +// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +// this is used to modify the call-stack offset when computing line numbers. // -//This is most useful in helper functions that make assertions. If you want Gomega's -//error message to refer to the calling line in the test (as opposed to the line in the helper function) -//set the first argument of `ExpectWithOffset` appropriately. -func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { +// This is most useful in helper functions that make assertions. If you want Gomega's +// error message to refer to the calling line in the test (as opposed to the line in the helper function) +// set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { if globalFailWrapper == nil { panic(nilFailHandlerPanic) } return assertion.New(actual, globalFailWrapper, offset, extra...) } -//Eventually wraps an actual value allowing assertions to be made on it. -//The assertion is tried periodically until it passes or a timeout occurs. +// Eventually wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically until it passes or a timeout occurs. // -//Both the timeout and polling interval are configurable as optional arguments: -//The first optional argument is the timeout -//The second optional argument is the polling interval +// Both the timeout and polling interval are configurable as optional arguments: +// The first optional argument is the timeout +// The second optional argument is the polling interval // -//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -//last case they are interpreted as seconds. +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. // -//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, -//then Eventually will call the function periodically and try the matcher against the function's first return value. +// If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +// then Eventually will call the function periodically and try the matcher against the function's first return value. // -//Example: +// Example: // // Eventually(func() int { // return thingImPolling.Count() // }).Should(BeNumerically(">=", 17)) // -//Note that this example could be rewritten: +// Note that this example could be rewritten: // // Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) // -//If the function returns more than one value, then Eventually will pass the first value to the matcher and -//assert that all other values are nil/zero. -//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// If the function returns more than one value, then Eventually will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. // -//For example, consider a method that returns a value and an error: +// For example, consider a method that returns a value and an error: // func FetchFromDB() (string, error) // -//Then +// Then // Eventually(FetchFromDB).Should(Equal("hasselhoff")) // -//Will pass only if the the returned error is nil and the returned string passes the matcher. +// Will pass only if the the returned error is nil and the returned string passes the matcher. // -//Eventually's default timeout is 1 second, and its default polling interval is 10ms -func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { return EventuallyWithOffset(0, actual, intervals...) } -//EventuallyWithOffset operates like Eventually but takes an additional -//initial argument to indicate an offset in the call stack. This is useful when building helper -//functions that contain matchers. To learn more, read about `ExpectWithOffset`. -func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// EventuallyWithOffset operates like Eventually but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { if globalFailWrapper == nil { panic(nilFailHandlerPanic) } @@ -231,37 +225,37 @@ func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) } -//Consistently wraps an actual value allowing assertions to be made on it. -//The assertion is tried periodically and is required to pass for a period of time. +// Consistently wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically and is required to pass for a period of time. // -//Both the total time and polling interval are configurable as optional arguments: -//The first optional argument is the duration that Consistently will run for -//The second optional argument is the polling interval +// Both the total time and polling interval are configurable as optional arguments: +// The first optional argument is the duration that Consistently will run for +// The second optional argument is the polling interval // -//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -//last case they are interpreted as seconds. +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. // -//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, -//then Consistently will call the function periodically and try the matcher against the function's first return value. +// If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +// then Consistently will call the function periodically and try the matcher against the function's first return value. // -//If the function returns more than one value, then Consistently will pass the first value to the matcher and -//assert that all other values are nil/zero. -//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// If the function returns more than one value, then Consistently will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. // -//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. -//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: // -// Consistently(channel).ShouldNot(Receive()) +// Consistently(channel).ShouldNot(Receive()) // -//Consistently's default duration is 100ms, and its default polling interval is 10ms -func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { return ConsistentlyWithOffset(0, actual, intervals...) } -//ConsistentlyWithOffset operates like Consistnetly but takes an additional -//initial argument to indicate an offset in the call stack. This is useful when building helper -//functions that contain matchers. To learn more, read about `ExpectWithOffset`. -func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// ConsistentlyWithOffset operates like Consistnetly but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { if globalFailWrapper == nil { panic(nilFailHandlerPanic) } @@ -276,59 +270,62 @@ func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interfa return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) } -//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { defaultEventuallyTimeout = t } -//Set the default polling interval for Eventually. +// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually. func SetDefaultEventuallyPollingInterval(t time.Duration) { defaultEventuallyPollingInterval = t } -//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long. func SetDefaultConsistentlyDuration(t time.Duration) { defaultConsistentlyDuration = t } -//Set the default polling interval for Consistently. +// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently. func SetDefaultConsistentlyPollingInterval(t time.Duration) { defaultConsistentlyPollingInterval = t } -//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against -//the matcher passed to the Should and ShouldNot methods. +// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +// the matcher passed to the Should and ShouldNot methods. // -//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to -//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more -//descriptive +// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +// descriptive. // -//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. // -//Example: +// Example: // -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") -type GomegaAsyncAssertion interface { +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type AsyncAssertion interface { Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool } -//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher -//passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. +type GomegaAsyncAssertion = AsyncAssertion + +// Assertion is returned by Ω and Expect and compares the actual value to the matcher +// passed to the Should/ShouldNot and To/ToNot/NotTo methods. // -//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect -//though this is not enforced. +// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +// though this is not enforced. // -//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() -//and is used to annotate failure messages. +// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +// and is used to annotate failure messages. // -//All methods return a bool that is true if hte assertion passed and false if it failed. +// All methods return a bool that is true if hte assertion passed and false if it failed. // -//Example: +// Example: // -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) -type GomegaAssertion interface { +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type Assertion interface { Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool @@ -337,39 +334,50 @@ type GomegaAssertion interface { NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool } -//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +// GomegaAssertion is deprecated in favor of Assertion, which does not stutter. +type GomegaAssertion = Assertion + +// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it type OmegaMatcher types.GomegaMatcher -//GomegaWithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage -//Gomega's rich ecosystem of matchers in standard `testing` test suites. +// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +// Gomega's rich ecosystem of matchers in standard `testing` test suites. // -//Use `NewGomegaWithT` to instantiate a `GomegaWithT` -type GomegaWithT struct { +// Use `NewWithT` to instantiate a `WithT` +type WithT struct { t types.GomegaTestingT } -//NewGomegaWithT takes a *testing.T and returngs a `GomegaWithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with -//Gomega's rich ecosystem of matchers in standard `testing` test suits. +// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. +type GomegaWithT = WithT + +// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// Gomega's rich ecosystem of matchers in standard `testing` test suits. // // func TestFarmHasCow(t *testing.T) { -// g := GomegaWithT(t) +// g := gomega.NewWithT(t) // // f := farm.New([]string{"Cow", "Horse"}) // g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") // } -func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { - return &GomegaWithT{ +func NewWithT(t types.GomegaTestingT) *WithT { + return &WithT{ t: t, } } -//See documentation for Expect -func (g *GomegaWithT) Expect(actual interface{}, extra ...interface{}) GomegaAssertion { +// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. +func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { + return NewWithT(t) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) } -//See documentation for Eventually -func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultEventuallyTimeout pollingInterval := defaultEventuallyPollingInterval if len(intervals) > 0 { @@ -381,8 +389,8 @@ func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) G return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) } -//See documentation for Consistently -func (g *GomegaWithT) Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultConsistentlyDuration pollingInterval := defaultConsistentlyPollingInterval if len(intervals) > 0 { diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index ebdd71786d..bef00ae21d 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -29,5 +29,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message } func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") + return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") } diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go index 3b412ce818..5c815f5af7 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -70,7 +70,7 @@ func parseXmlContent(content string) (*xmlNode, error) { if err == io.EOF { break } - return nil, fmt.Errorf("failed to decode next token: %v", err) + return nil, fmt.Errorf("failed to decode next token: %v", err) // untested section } lastNodeIndex := len(allNodes) - 1 @@ -94,7 +94,7 @@ func parseXmlContent(content string) (*xmlNode, error) { case xml.CharData: lastNode.Content = append(lastNode.Content, tok.Copy()...) case xml.Comment: - lastNode.Comments = append(lastNode.Comments, tok.Copy()) + lastNode.Comments = append(lastNode.Comments, tok.Copy()) // untested section case xml.ProcInst: lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go deleted file mode 100644 index 288f0e8548..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.12 - -package prometheus - -import "runtime/debug" - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. -func readBuildInfo() (path, version, sum string) { - path, version, sum = "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go deleted file mode 100644 index 6609e2877c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.12 - -package prometheus - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before -// 1.12. Remove this whole file once the minimum supported Go version is 1.12. -func readBuildInfo() (path, version, sum string) { - return "unknown", "unknown", "unknown" -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 1e839650d4..c0d70b2faf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -79,7 +79,7 @@ type Collector interface { // of the Describe method. If a Collector sometimes collects no metrics at all // (for example vectors like CounterVec, GaugeVec, etc., which only collect // metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register +// it might even get registered as an unchecked Collecter (cf. the Register // method of the Registerer interface). Hence, only use this shortcut // implementation of Describe if you are certain to fulfill the contract. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 1e0d578ee7..5d9525defc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -122,13 +122,13 @@ // the Collect method. The Describe method has to return separate Desc // instances, representative of the “throw-away” metrics to be created later. // NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will mark the Collector “unchecked”. No -// checks are performed at registration time, but metric consistency will still +// could return no Desc at all, which will marke the Collector “unchecked”. No +// checks are porformed at registration time, but metric consistency will still // be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact +// sometimes necessary. The typical use case is a situatios where the exact // metrics to be returned by a Collector cannot be predicted at registration // time, but the implementer has sufficient knowledge of the whole system to // guarantee metric consistency. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index dc9247fed9..ba3b9333ed 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -14,9 +14,9 @@ package prometheus import ( + "fmt" "runtime" "runtime/debug" - "sync" "time" ) @@ -26,41 +26,16 @@ type goCollector struct { gcDesc *Desc goInfoDesc *Desc - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. + // metrics to describe and collect + metrics memStatsMetrics } -// NewGoCollector returns a collector that exports metrics about the current Go +// NewGoCollector returns a collector which exports metrics about the current Go // process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per +// is called. This causes a stop-the-world, which is very short with Go1.9+ +// (~25µs). However, with older Go versions, the stop-the-world duration depends +// on the heap size and can be quite significant (~1.7 ms/GiB as per // https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. (The problem might be solved -// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go -// issue.) func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( @@ -79,11 +54,7 @@ func NewGoCollector() Collector { "go_info", "Information about the Go environment.", nil, Labels{"version": runtime.Version()}), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: memStatsMetrics{ + metrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -282,7 +253,7 @@ func NewGoCollector() Collector { } func memstatNamespace(s string) string { - return "go_memstats_" + s + return fmt.Sprintf("go_memstats_%s", s) } // Describe returns all descriptions of the collector. @@ -291,27 +262,13 @@ func (c *goCollector) Describe(ch chan<- *Desc) { ch <- c.threadsDesc ch <- c.gcDesc ch <- c.goInfoDesc - for _, i := range c.msMetrics { + for _, i := range c.metrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) n, _ := runtime.ThreadCreateProfile(nil) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) @@ -329,31 +286,9 @@ func (c *goCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -364,33 +299,3 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() Collector { - path, version, sum := readBuildInfo() - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index d7ea67bd2b..f88da707bc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts: + // Finally we know the final length of h.upperBounds and can make counts + // for both states: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) @@ -224,21 +224,18 @@ type histogramCounts struct { } type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. + // countAndHotIdx is a complicated one. For lock-free yet atomic + // observations, we need to save the total count of observations again, + // combined with the index of the currently-hot counts struct, so that + // we can perform the operation on both values atomically. The least + // significant bit defines the hot counts struct. The remaining 63 bits + // represent the total count of observations. This happens under the + // assumption that the 63bit count will never overflow. Rationale: An + // observations takes about 30ns. Let's assume it could happen in + // 10ns. Overflowing the counter will then take at least (2^63)*10ns, + // which is about 3000 years. // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: + // This has to be first in the struct for 64bit alignment. See // http://golang.org/pkg/sync/atomic/#pkg-note-BUG countAndHotIdx uint64 @@ -246,14 +243,16 @@ type histogram struct { desc *Desc writeMtx sync.Mutex // Only used in the Write method. + upperBounds []float64 + // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of // pointers to guarantee 64bit alignment of the histogramCounts, see // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts + hotIdx int // Index of currently-hot counts. Only used within Write. - upperBounds []float64 - labelPairs []*dto.LabelPair + labelPairs []*dto.LabelPair } func (h *histogram) Desc() *Desc { @@ -272,11 +271,11 @@ func (h *histogram) Observe(v float64) { // 300 buckets: 154 ns/op linear - binary 61.6 ns/op i := sort.SearchFloat64s(h.upperBounds, v) - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value + // We increment h.countAndHotIdx by 2 so that the counter in the upper + // 63 bits gets incremented by 1. At the same time, we get the new value // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] + n := atomic.AddUint64(&h.countAndHotIdx, 2) + hotCounts := h.counts[n%2] if i < len(h.upperBounds) { atomic.AddUint64(&hotCounts.buckets[i], 1) @@ -294,43 +293,72 @@ func (h *histogram) Observe(v float64) { } func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. + var ( + his = &dto.Histogram{} + buckets = make([]*dto.Bucket, len(h.upperBounds)) + hotCounts, coldCounts *histogramCounts + count uint64 + ) + + // For simplicity, we mutex the rest of this method. It is not in the + // hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it. h.writeMtx.Lock() defer h.writeMtx.Unlock() - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. + // This is a bit arcane, which is why the following spells out this if + // clause in English: + // + // If the currently-hot counts struct is #0, we atomically increment + // h.countAndHotIdx by 1 so that from now on Observe will use the counts + // struct #1. Furthermore, the atomic increment gives us the new value, + // which, in its most significant 63 bits, tells us the count of + // observations done so far up to and including currently ongoing + // observations still using the counts struct just changed from hot to + // cold. To have a normal uint64 for the count, we bitshift by 1 and + // save the result in count. We also set h.hotIdx to 1 for the next + // Write call, and we will refer to counts #1 as hotCounts and to counts + // #0 as coldCounts. + // + // If the currently-hot counts struct is #1, we do the corresponding + // things the other way round. We have to _decrement_ h.countAndHotIdx + // (which is a bit arcane in itself, as we have to express -1 with an + // unsigned int...). + if h.hotIdx == 0 { + count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 + h.hotIdx = 1 + hotCounts = h.counts[1] + coldCounts = h.counts[0] + } else { + count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. + h.hotIdx = 0 + hotCounts = h.counts[0] + coldCounts = h.counts[1] } - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + // Now we have to wait for the now-declared-cold counts to actually cool + // down, i.e. wait for all observations still using it to finish. That's + // the case once the count in the cold counts struct is the same as the + // one atomically retrieved from the upper 63bits of h.countAndHotIdx. + for { + if count == atomic.LoadUint64(&coldCounts.count) { + break + } + runtime.Gosched() // Let observations get work done. } + + his.SampleCount = proto.Uint64(count) + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ + buckets[i] = &dto.Bucket{ CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } } + his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 19a3e8f493..9f0875bfc8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -34,6 +34,7 @@ import ( const ( contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) @@ -330,8 +331,6 @@ type fancyResponseWriterDelegator struct { } func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. return f.ResponseWriter.(http.CloseNotifier).CloseNotify() } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 37d2026ac4..55176d58ce 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -126,7 +126,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewDefaultFS(); err == nil { + if _, err := procfs.NewStat(); err == nil { c.collectFn = c.processCollect } else { c.collectFn = func(ch chan<- Metric) { @@ -166,7 +166,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { return } - if stat, err := p.Stat(); err == nil { + if stat, err := p.NewStat(); err == nil { ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) @@ -185,7 +185,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { c.reportError(ch, c.openFDs, err) } - if limits, err := p.Limits(); err == nil { + if limits, err := p.NewLimits(); err == nil { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) } else { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index fa535684f9..67b56d37cf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -38,6 +38,7 @@ type delegator interface { type responseWriterDelegator struct { http.ResponseWriter + handler, method string status int written int64 wroteHeader bool @@ -74,11 +75,8 @@ type closeNotifierDelegator struct{ *responseWriterDelegator } type flusherDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { @@ -95,9 +93,6 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) @@ -201,157 +196,4 @@ func init() { http.CloseNotifier }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 0000000000..31a7069569 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,181 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +type pusherDelegator struct{ *responseWriterDelegator } + +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func init() { + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go new file mode 100644 index 0000000000..8bb9b8b68f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index cea5a90fd9..668eb6b3c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -47,6 +47,7 @@ import ( const ( contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) @@ -84,32 +85,10 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var ( - inFlightSem chan struct{} - errCnt = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_errors_total", - Help: "Total number of internal errors encountered by the promhttp metric handler.", - }, - []string{"cause"}, - ) - ) - + var inFlightSem chan struct{} if opts.MaxRequestsInFlight > 0 { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } - if opts.Registry != nil { - // Initialize all possibilites that can occur below. - errCnt.WithLabelValues("gathering") - errCnt.WithLabelValues("encoding") - if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - errCnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { @@ -128,7 +107,6 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) } - errCnt.WithLabelValues("gathering").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -169,7 +147,6 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error encoding and sending metric family:", err) } - errCnt.WithLabelValues("encoding").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -260,12 +237,9 @@ const ( // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. In this case, it is highly - // recommended to provide other means of detecting errors: By setting an - // ErrorLog in HandlerOpts, the errors are logged. By providing a - // Registry in HandlerOpts, the exposed metrics include an error counter - // "promhttp_metric_handler_errors_total", which can be used for - // alerts. + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. ContinueOnError // Panic upon the first error encountered (useful for "crash only" apps). PanicOnError @@ -288,18 +262,6 @@ type HandlerOpts struct { // logged regardless of the configured ErrorHandling provided ErrorLog // is not nil. ErrorHandling HandlerErrorHandling - // If Registry is not nil, it is used to register a metric - // "promhttp_metric_handler_errors_total", partitioned by "cause". A - // failed registration causes a panic. Note that this error counter is - // different from the instrumentation you get from the various - // InstrumentHandler... helpers. It counts errors that don't necessarily - // result in a non-2xx HTTP status code. There are two typical cases: - // (1) Encoding errors that only happen after streaming of the HTTP body - // has already started (and the status code 200 has been sent). This - // should only happen with custom collectors. (2) Collection errors with - // no effect on the HTTP status code because ErrorHandling is set to - // ContinueOnError. - Registry prometheus.Registerer // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 83c49b66a8..86fd564470 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -14,9 +14,7 @@ package promhttp import ( - "crypto/tls" "net/http" - "net/http/httptrace" "time" "github.com/prometheus/client_golang/prometheus" @@ -97,123 +95,3 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT return resp, err }) } - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 0000000000..a034d1ec0f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index f2fb67aeeb..b5e70b93fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -680,7 +680,7 @@ func processMetric( // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and +// Gather calles are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index ec663ec3d4..2980614dff 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,10 +16,8 @@ package prometheus import ( "fmt" "math" - "runtime" "sort" "sync" - "sync/atomic" "time" "github.com/beorn7/perks/quantile" @@ -39,7 +37,7 @@ const quantileLabel = "quantile" // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// upcoming v0.10 of the library. There will be no rank estimations at all by // default. For a sane transition, it is recommended to set the desired rank // estimations explicitly. // @@ -61,7 +59,7 @@ type Summary interface { // DefObjectives are the default Summary quantile values. // // Deprecated: DefObjectives will not be used as the default objectives in -// v1.0.0 of the library. The default Summary will have no quantiles then. +// v0.10 of the library. The default Summary will have no quantiles then. var ( DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} @@ -86,7 +84,7 @@ const ( // mandatory to set Name to a non-empty string. While all other fields are // optional and can safely be left at their zero value, it is recommended to set // a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. +// as the default value will change in the upcoming v0.10 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -127,10 +125,9 @@ type SummaryOpts struct { // its zero value (i.e. nil). To create a Summary without Objectives, // set it to an empty map (i.e. map[float64]float64{}). // - // Note that the current value of DefObjectives is deprecated. It will - // be replaced by an empty map in v1.0.0 of the library. Please - // explicitly set Objectives to the desired value to avoid problems - // during the transition. + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -154,7 +151,7 @@ type SummaryOpts struct { BufCap uint32 } -// Problem with the sliding-window decay algorithm... The Merge method of +// Great fuck-up with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -217,17 +214,6 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, - } - s.init(s) // Init self-collection. - return s - } - s := &summary{ desc: desc, @@ -396,116 +382,6 @@ func (s *summary) swapBufs(now time.Time) { } } -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - type quantSort []*dto.Quantile func (s quantSort) Len() int { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 8e473d0fe9..16655d417c 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -436,11 +436,11 @@ func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (in func writeFloat(w enhancedWriter, f float64) (int, error) { switch { case f == 1: - return 1, w.WriteByte('1') + return w.WriteString("1.0") case f == 0: - return 1, w.WriteByte('0') + return w.WriteString("0.0") case f == -1: - return w.WriteString("-1") + return w.WriteString("-1.0") case math.IsNaN(f): return w.WriteString("NaN") case math.IsInf(f, +1): @@ -450,6 +450,12 @@ func writeFloat(w enhancedWriter, f float64) (int, error) { default: bp := numBufPool.Get().(*[]byte) *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + // Add a .0 if used fixed point and there is no decimal + // point already. This is for future proofing with OpenMetrics, + // where floats always contain either an exponent or decimal. + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } written, err := w.Write(*bp) numBufPool.Put(bp) return written, err diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7b0064fdb2..46259b1f10 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -150,13 +150,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } + *t = Time(v + va) default: return fmt.Errorf("invalid time %q", string(b)) diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 438ca92eca..0000000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,6 +0,0 @@ -# Run only staticcheck for now. Additional linters will be enabled one-by-one. -linters: - enable: - - staticcheck - - govet - disable-all: true diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3e3..f1d3b9937b 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,2 @@ +* Tobias Schmidt @grobie * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 616a0d25eb..947d7d8fa7 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,16 +14,17 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ +update_fixtures: fixtures.ttar sysfs/fixtures.ttar + +%fixtures.ttar: %/fixtures + rm -v $(dir $*)fixtures/.unpacked + ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index c7f9ea64ff..741579e60f 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -29,15 +29,12 @@ GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := +unexport GOVENDOR ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). @@ -58,57 +55,32 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$( # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif + + unexport GO111MODULE endif PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = ./... -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif +GO_VERSION ?= $(shell $(GO) version) +GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) -PROMU_VERSION ?= 0.4.0 +PROMU_VERSION ?= 0.2.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.16.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif -endif - PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif +.PHONY: all +all: precheck style staticcheck unused build test # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; -.PHONY: common-all -common-all: precheck style check_license lint unused build test - .PHONY: common-style common-style: @echo ">> checking code style" @@ -130,15 +102,6 @@ common-check_license: exit 1; \ fi -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - .PHONY: common-test-short common-test-short: @echo ">> running short tests" @@ -147,35 +110,26 @@ common-test-short: .PHONY: common-test common-test: @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" +.PHONY: common-staticcheck +common-staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) else - $(GOLANGCI_LINT) run $(pkgs) + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) endif -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) @@ -186,9 +140,8 @@ else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod -else +ifneq (,$(wildcard vendor)) @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ @@ -206,48 +159,45 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - . - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" +.PHONY: common-docker +common-docker: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +.PHONY: common-docker-publish +common-docker-publish: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + +.PHONY: common-docker-tag-latest +common-docker-tag-latest: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" .PHONY: promu promu: $(PROMU) $(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) + curl -s -L $(PROMU_URL) | tar -xvz -C /tmp + mkdir -v -p $(FIRST_GOPATH)/bin + cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +.PHONY: $(STATICCHECK) +$(STATICCHECK): +ifdef GO111MODULE +# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. +# See https://github.com/golang/go/issues/27643. +# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. + tmpModule=$$(mktemp -d 2>&1) && \ + mkdir -p $${tmpModule}/staticcheck && \ + cd "$${tmpModule}"/staticcheck && \ + GO111MODULE=on $(GO) mod init example.com/staticcheck && \ + GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ + rm -rf $${tmpModule}; +else + GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck endif ifdef GOVENDOR @@ -262,6 +212,7 @@ precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck + PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 6f8850feb6..2095494719 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -1,7 +1,7 @@ # procfs This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystems /proc and /sys. +metrics from the pseudo-filesystem proc. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. @@ -9,45 +9,3 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Building and Testing - -The procfs library is normally built as part of another application. However, when making -changes to the library, the `make test` command can be used to run the API test suite. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. - -```bash -rm -rf fixtures -make test -``` - -Next, make the required changes to the extracted files in the `fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 63d4229a45..d3a8268078 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,9 +31,19 @@ type BuddyInfo struct { Sizes []float64 } +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 951d909af5..13c831ef59 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -1,48 +1,45 @@ # Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 +Directory: fixtures/26231 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline +Path: fixtures/26231/cmdline Lines: 1 vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm +Path: fixtures/26231/comm Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd +Path: fixtures/26231/cwd SymlinkTo: /usr/bin # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe +Path: fixtures/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd +Directory: fixtures/26231/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 +Path: fixtures/26231/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 +Path: fixtures/26231/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 +Path: fixtures/26231/fd/10 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 +Path: fixtures/26231/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 +Path: fixtures/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io +Path: fixtures/26231/io Lines: 7 rchar: 750339 wchar: 818609 @@ -53,7 +50,7 @@ write_bytes: 2048 cancelled_write_bytes: -1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits +Path: fixtures/26231/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds @@ -74,14 +71,14 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 +Path: fixtures/26231/mountstats +Lines: 19 device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured @@ -94,14 +91,13 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers= NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net +Directory: fixtures/26231/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev +Path: fixtures/26231/net/dev Lines: 4 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -109,226 +105,142 @@ Inter-| Receive | Transmit eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns +Directory: fixtures/26231/ns Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt +Path: fixtures/26231/ns/mnt SymlinkTo: mnt:[4026531840] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net +Path: fixtures/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root +Path: fixtures/26231/root SymlinkTo: / # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat +Path: fixtures/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 1 -Ngid: 0 -Pid: 1 -PPid: 0 -TracerPid: 0 -Uid: 0 0 0 0 -Gid: 0 0 0 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 +Directory: fixtures/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline +Path: fixtures/26232/cmdline Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm +Path: fixtures/26232/comm Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd +Path: fixtures/26232/cwd SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd +Directory: fixtures/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 +Path: fixtures/26232/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 +Path: fixtures/26232/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 +Path: fixtures/26232/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 +Path: fixtures/26232/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 +Path: fixtures/26232/fd/4 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits +Path: fixtures/26232/limits Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/root SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat +Path: fixtures/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 +Directory: fixtures/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline +Path: fixtures/26233/cmdline Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 +Directory: fixtures/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat +Path: fixtures/584/stat Lines: 2 1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 #!/bin/cat /proc/self/stat Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Node 0, zone +Node 0, zone +Node 0, zone Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 49 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 -Mode: 664 +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs +Directory: fixtures/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs +Directory: fixtures/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat +Path: fixtures/fs/xfs/stat Lines: 23 extent_alloc 92447 97589 92448 93751 abt 0 0 0 0 @@ -355,18 +267,18 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat +Path: fixtures/mdstat Lines: 26 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - + md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] - + md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] - + md4 : inactive raid1 sda3[0] sdb3[1] 4883648 blocks [2/2] [UU] @@ -385,10 +297,10 @@ md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net +Directory: fixtures/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev +Path: fixtures/net/dev Lines: 6 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -398,7 +310,7 @@ docker0: 2568 38 0 0 0 0 0 0 438 eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs +Path: fixtures/net/ip_vs Lines: 21 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags @@ -423,7 +335,7 @@ FWM 10001000 wlc -> C0A83215:0CEA Route 0 0 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats +Path: fixtures/net/ip_vs_stats Lines: 6 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes @@ -433,10 +345,10 @@ Lines: 6 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc +Directory: fixtures/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs +Path: fixtures/net/rpc/nfs Lines: 5 net 18628 0 18628 6 rpc 4329785 0 4338291 @@ -445,7 +357,7 @@ proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd +Path: fixtures/net/rpc/nfsd Lines: 11 rc 0 6 18622 fh 0 0 0 0 0 @@ -460,27 +372,7 @@ proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat +Path: fixtures/net/xfrm_stat Lines: 28 XfrmInError 1 XfrmInBufferError 2 @@ -512,30 +404,10 @@ XfrmOutStateInvalid 28765 XfrmAcquireError 24532 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self +Path: fixtures/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat +Path: fixtures/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 @@ -555,1254 +427,36 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets +Directory: fixtures/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README +Path: fixtures/symlinktargets/README Lines: 2 This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def +Path: fixtures/symlinktargets/abc Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi +Path: fixtures/symlinktargets/def Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias +Path: fixtures/symlinktargets/ghi Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id +Path: fixtures/symlinktargets/uvw Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name +Path: fixtures/symlinktargets/xyz Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id +Path: fixtures/.unpacked Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/alarm -Lines: 1 -2503000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=12229000 -POWER_SUPPLY_POWER_NOW=4830000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=50060000 -POWER_SUPPLY_ENERGY_NOW=49450000 -POWER_SUPPLY_CAPACITY=98 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 -44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 0102ab0fd8..b6c6b2ce1f 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -14,30 +14,69 @@ package procfs import ( - "github.com/prometheus/procfs/internal/fs" + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" ) -// FS represents the pseudo-filesystem sys, which provides an interface to +// FS represents the pseudo-filesystem proc, which provides an interface to // kernel data structures. -type FS struct { - proc fs.FS -} +type FS string // DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const DefaultMountPoint = "/proc" -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil } -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) if err != nil { - return FS{}, err + return nil, err } - return FS{fs}, nil + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) } diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index 8a1b839fd2..e89ee6c90f 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -1,3 +1 @@ module github.com/prometheus/procfs - -require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum deleted file mode 100644 index 7827dd3d56..0000000000 --- a/vendor/github.com/prometheus/procfs/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index c66a1cf80e..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 0000000000..2ff228e9d1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go new file mode 100644 index 0000000000..df0d567b78 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 2d6cb8d1c6..e36d4a3bd0 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -62,9 +62,19 @@ type IPVSBackendStatus struct { Weight uint64 } -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs_stats")) +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } @@ -121,9 +131,19 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 71c1067825..9dc19583d8 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -42,64 +42,64 @@ type MDStat struct { BlocksSynced int64 } -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdstatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdstatData), "\n") + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") for i, l := range lines { - if strings.TrimSpace(l) == "" || l[0] == ' ' || - strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { continue } - deviceFields := strings.Fields(l) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l) + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) } - mdName := deviceFields[0] - activityState := deviceFields[2] + mdName := mainLine[0] + activityState := mainLine[2] if len(lines) <= i+3 { - return mdStats, fmt.Errorf("missing lines for md device %s", mdName) + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) } - active, total, size, err := evalStatusLine(lines[i+1]) + active, total, size, err := evalStatusline(lines[i+1]) if err != nil { - return nil, err + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } - syncLineIdx := i + 2 + // j is the line number of the syncing-line. + j := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ + j = i + 3 } - // If device is recovering/syncing at the moment, get the number of currently + // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") { - syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) if err != nil { - return nil, err + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } } - mdStats = append(mdStats, MDStat{ + mdStates = append(mdStates, MDStat{ Name: mdName, ActivityState: activityState, DisksActive: active, @@ -109,10 +109,10 @@ func parseMDStat(mdstatData []byte) ([]MDStat, error) { }) } - return mdStats, nil + return mdStates, nil } -func evalStatusLine(statusline string) (active, total, size int64, err error) { +func evalStatusline(statusline string) (active, total, size int64, err error) { matches := statuslineRE.FindStringSubmatch(statusline) if len(matches) != 4 { return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) @@ -136,7 +136,7 @@ func evalStatusLine(statusline string) (active, total, size int64, err error) { return active, total, size, nil } -func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) { +func evalBuildline(buildline string) (syncedBlocks int64, err error) { matches := buildlineRE.FindStringSubmatch(buildline) if len(matches) != 2 { return 0, fmt.Errorf("unexpected buildline: %s", buildline) diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 35b2ef3513..7a8a1e0990 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -69,8 +69,6 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string - // The mount options of the NFS mount. - Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -181,11 +179,11 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 + CumulativeQueueTime time.Duration // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 + CumulativeTotalResponseTime time.Duration // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 + CumulativeTotalRequestTime time.Duration } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -204,7 +202,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 + IdleTime time.Duration // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -319,7 +317,6 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( - fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -341,18 +338,6 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } switch ss[0] { - case fieldOpts: - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } case fieldAge: // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -524,15 +509,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, }) } @@ -608,7 +593,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], + IdleTime: time.Duration(ns[4]) * time.Second, Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index a0b7a01196..3f2523371a 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -47,13 +47,23 @@ type NetDevLine struct { // are interface names. type NetDev map[string]NetDevLine -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) } -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } @@ -65,7 +75,7 @@ func newNetDev(file string) (NetDev, error) { } defer f.Close() - netDev := NetDev{} + nd := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. @@ -73,20 +83,20 @@ func newNetDev(file string) (NetDev, error) { continue } - line, err := netDev.parseLine(s.Text()) + line, err := nd.parseLine(s.Text()) if err != nil { - return netDev, err + return nd, err } - netDev[line.Name] = *line + nd[line.Name] = *line } - return netDev, s.Err() + return nd, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") @@ -175,11 +185,11 @@ func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { +func (nd NetDev) Total() NetDevLine { total := NetDevLine{} - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { + names := make([]string, 0, len(nd)) + for _, ifc := range nd { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index 240340a83a..0000000000 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -const ( - netUnixKernelPtrIdx = iota - netUnixRefCountIdx - _ - netUnixFlagsIdx - netUnixTypeIdx - netUnixStateIdx - netUnixInodeIdx - - // Inode and Path are optional. - netUnixStaticFieldsCnt = 6 -) - -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") - -// NetUnixType is the type of the type field. -type NetUnixType uint64 - -// NetUnixFlags is the type of the flags field. -type NetUnixFlags uint64 - -// NetUnixState is the type of the state field. -type NetUnixState uint64 - -// NetUnixLine represents a line of /proc/net/unix. -type NetUnixLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUnixFlags - Type NetUnixType - State NetUnixState - Inode uint64 - Path string -} - -// NetUnix holds the data read from /proc/net/unix. -type NetUnix struct { - Rows []*NetUnixLine -} - -// NewNetUnix returns data read from /proc/net/unix. -func NewNetUnix() (*NetUnix, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetUnix() -} - -// NewNetUnix returns data read from /proc/net/unix. -func (fs FS) NewNetUnix() (*NetUnix, error) { - return NewNetUnixByPath(fs.proc.Path("net/unix")) -} - -// NewNetUnixByPath returns data read from /proc/net/unix by file path. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByPath(path string) (*NetUnix, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return NewNetUnixByReader(f) -} - -// NewNetUnixByReader returns data read from /proc/net/unix by a reader. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { - nu := &NetUnix{ - Rows: make([]*NetUnixLine, 0, 32), - } - scanner := bufio.NewScanner(reader) - // Omit the header line. - scanner.Scan() - header := scanner.Text() - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. - // This code works for both cases. - hasInode := strings.Contains(header, "Inode") - - minFieldsCnt := netUnixStaticFieldsCnt - if hasInode { - minFieldsCnt++ - } - for scanner.Scan() { - line := scanner.Text() - item, err := nu.parseLine(line, hasInode, minFieldsCnt) - if err != nil { - return nu, err - } - nu.Rows = append(nu.Rows, item) - } - - return nu, scanner.Err() -} - -func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { - fields := strings.Fields(line) - fieldsLen := len(fields) - if fieldsLen < minFieldsCnt { - return nil, fmt.Errorf( - "Parse Unix domain failed: expect at least %d fields but got %d", - minFieldsCnt, fieldsLen) - } - kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) - } - users, err := u.parseUsers(fields[netUnixRefCountIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) - } - flags, err := u.parseFlags(fields[netUnixFlagsIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) - } - typ, err := u.parseType(fields[netUnixTypeIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) - } - state, err := u.parseState(fields[netUnixStateIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) - } - var inode uint64 - if hasInode { - inodeStr := fields[netUnixInodeIdx] - inode, err = u.parseInode(inodeStr) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) - } - } - - nuLine := &NetUnixLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if fieldsLen > minFieldsCnt { - pathIdx := netUnixInodeIdx + 1 - if !hasInode { - pathIdx-- - } - nuLine.Path = fields[pathIdx] - } - - return nuLine, nil -} - -func (u NetUnix) parseKernelPtr(str string) (string, error) { - if !strings.HasSuffix(str, ":") { - return "", errInvalidKernelPtrFmt - } - return str[:len(str)-1], nil -} - -func (u NetUnix) parseUsers(hexStr string) (uint64, error) { - return strconv.ParseUint(hexStr, 16, 32) -} - -func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { - return strconv.ParseUint(hexStr, 16, 32) -} - -func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { - typ, err := strconv.ParseUint(hexStr, 16, 16) - if err != nil { - return 0, err - } - return NetUnixType(typ), nil -} - -func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { - flags, err := strconv.ParseUint(hexStr, 16, 32) - if err != nil { - return 0, err - } - return NetUnixFlags(flags), nil -} - -func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { - st, err := strconv.ParseInt(hexStr, 16, 8) - if err != nil { - return 0, err - } - return NetUnixState(st), nil -} - -func (u NetUnix) parseInode(inodeStr string) (uint64, error) { - return strconv.ParseUint(inodeStr, 10, 64) -} - -func (t NetUnixType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUnixFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUnixState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 0000000000..651bf68195 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 0000000000..95a83cc5bc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 0000000000..c0d3a5ad9b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 0000000000..57bb4a3585 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 8a8430147e..06bed0ef4a 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -20,8 +20,6 @@ import ( "os" "strconv" "strings" - - "github.com/prometheus/procfs/internal/fs" ) // Proc provides information about a running process. @@ -29,7 +27,7 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. @@ -54,7 +52,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.Proc(pid) + return fs.NewProc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -68,35 +66,28 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) + p, err := os.Readlink(fs.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) if err != nil { return Proc{}, err } - return fs.Proc(pid) + return fs.NewProc(pid) } // NewProc returns a process for the given pid. -// -// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) + d, err := os.Open(fs.Path()) if err != nil { return Procs{}, err } @@ -113,7 +104,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0ff89b1cef..0251c83bfe 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -39,8 +39,8 @@ type ProcIO struct { CancelledWriteBytes int64 } -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { pio := ProcIO{} f, err := os.Open(p.path("io")) diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 91ee24df8b..f04ba6fda8 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -78,14 +78,7 @@ var ( ) // NewLimits returns the current soft limits of the process. -// -// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c66740ff74..d06c26ebad 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -29,9 +29,9 @@ type Namespace struct { // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace -// Namespaces reads from /proc//ns/* to get the namespaces of which the +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the // process is a member. -func (p Proc) Namespaces() (Namespaces, error) { +func (p Proc) NewNamespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index 46fe266263..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "strings" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) - } - - defer file.Close() - return parsePSIStats(resource, file) -} - -// parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - stats, err := ioutil.ReadAll(file) - if err != nil { - return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) - } - - for _, l := range strings.Split(string(stats), "\n") { - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 6ed98a8ae3..3cf2a9f18f 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,8 +18,6 @@ import ( "fmt" "io/ioutil" "os" - - "github.com/prometheus/procfs/internal/fs" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -97,22 +95,15 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize uint + VSize int // Resident set size in pages. RSS int - proc fs.FS + fs FS } // NewStat returns the current status information of the process. -// -// Deprecated: use NewStat() instead func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { f, err := os.Open(p.path("stat")) if err != nil { return ProcStat{}, err @@ -127,7 +118,7 @@ func (p Proc) Stat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, proc: p.fs} + s = ProcStat{PID: p.PID, fs: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) @@ -173,7 +164,7 @@ func (p Proc) Stat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { +func (s ProcStat) VirtualMemory() int { return s.VSize } @@ -184,8 +175,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.fs.NewStat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index 6b4b61f71c..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Peak virtual memory size. - VmPeak uint64 - // Virtual memory size. - VmSize uint64 - // Locked memory size. - VmLck uint64 - // Pinned memory size. - VmPin uint64 - // Peak resident set size. - VmHWM uint64 - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 - // Size of resident anonymous memory. - RssAnon uint64 - // Size of resident file mappings. - RssFile uint64 - // Size of resident shared memory. - RssShmem uint64 - // Size of data segments. - VmData uint64 - // Size of stack segments. - VmStk uint64 - // Size of text segments. - VmExe uint64 - // Shared library code size. - VmLib uint64 - // Page table entries size. - VmPTE uint64 - // Size of second-level page tables. - VmPMD uint64 - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - f, err := os.Open(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) - // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - s.fillStatus(k, v, vKBytes, vBytes) - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Name": - s.Name = vString - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - } -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6661ee03a6..61eb6b0e3c 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -20,8 +20,6 @@ import ( "os" "strconv" "strings" - - "github.com/prometheus/procfs/internal/fs" ) // CPUStat shows how much time the cpu spend in various stages. @@ -80,6 +78,16 @@ type Stat struct { SoftIRQ SoftIRQStat } +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} @@ -141,31 +149,11 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { return softIRQStat, total, nil } -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead +// NewStat returns an information about current kernel/system statistics. func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt - f, err := os.Open(fs.proc.Path("stat")) + f, err := os.Open(fs.Path("stat")) if err != nil { return Stat{}, err } diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar index 19ef02b8d4..b0171a12b5 100644 --- a/vendor/github.com/prometheus/procfs/ttar +++ b/vendor/github.com/prometheus/procfs/ttar @@ -86,10 +86,8 @@ Usage: $bname [-C ] -c -f (create archive) $bname [-C ] -x -f (extract archive) Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) + -C (change directory) + -v (verbose) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -113,9 +111,8 @@ function set_cmd { } unset VERBOSE -unset RECURSIVE_UNLINK -while getopts :cf:-:htxvC: opt; do +while getopts :cf:htxvC: opt; do case $opt in c) set_cmd "create" @@ -139,18 +136,6 @@ while getopts :cf:-:htxvC: opt; do C) CDIR=$OPTARG ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo @@ -227,16 +212,16 @@ function extract { local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line + # An EOF not preceeded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash + # Replace NULLBYTE with null byte unless preceeded by backslash # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash + # Remove EOF unless preceeded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" @@ -260,16 +245,7 @@ function extract { fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then + if [ -e "$path" ] || [ -L "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then @@ -362,8 +338,8 @@ function _create { else < "$file" \ sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index 30aa417d53..8f1508f0fd 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) { // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + file, err := os.Open(fs.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 0000000000..b3d8634df3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + // fieldPushAil = "push_ail" + // fieldXstrat = "xstrat" + // fieldAbtb2 = "abtb2" + // fieldAbtc2 = "abtc2" + // fieldBmbt2 = "bmbt2" + // fieldIbt2 = "ibt2" + // fieldFibt2 = "fibt2" + // fieldQm = "qm" + // fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 0000000000..d86794b7ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore index 3b053c59ec..1b8c7c2611 100644 --- a/vendor/github.com/spf13/cobra/.gitignore +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -34,5 +34,3 @@ tags *.exe cobra.test - -.idea/* diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index 38b85f499c..5afcb20961 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -1,31 +1,21 @@ language: go -stages: - - diff - - test - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - tip - matrix: + include: + - go: 1.9.4 + - go: 1.10.0 + - go: tip allow_failures: - go: tip - include: - - stage: diff - go: 1.12.x - script: diff -u <(echo -n) <(gofmt -d -s .) before_install: - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - chmod +x bin/shellcheck - - go get -u github.com/kyoh86/richgo script: - - PATH=$PATH:$PWD/bin richgo test -v ./... + - PATH=$PATH:$PWD/bin go test -v ./... - go build + - diff -u <(echo -n) <(gofmt -d -s .) - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); fi diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 60c5a425bc..851fcc087c 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -2,29 +2,25 @@ Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. -Many of the most widely used Go projects are built using Cobra, such as: -[Kubernetes](http://kubernetes.io/), -[Hugo](http://gohugo.io), -[rkt](https://github.com/coreos/rkt), -[etcd](https://github.com/coreos/etcd), -[Moby (former Docker)](https://github.com/moby/moby), -[Docker (distribution)](https://github.com/docker/distribution), -[OpenShift](https://www.openshift.com/), -[Delve](https://github.com/derekparker/delve), -[GopherJS](http://www.gopherjs.org/), -[CockroachDB](http://www.cockroachlabs.com/), -[Bleve](http://www.blevesearch.com/), -[ProjectAtomic (enterprise)](http://www.projectatomic.io/), -[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl), -[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack), -[rclone](http://rclone.org/), -[nehm](https://github.com/bogem/nehm), -[Pouch](https://github.com/alibaba/pouch), -[Istio](https://istio.io), -[Prototool](https://github.com/uber/prototool), -[mattermost-server](https://github.com/mattermost/mattermost-server), -[Gardener](https://github.com/gardener/gardenctl), -etc. +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Moby (former Docker)](https://github.com/moby/moby) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +* [rclone](http://rclone.org/) +* [nehm](https://github.com/bogem/nehm) +* [Pouch](https://github.com/alibaba/pouch) [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) @@ -49,7 +45,6 @@ etc. * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) * [Generating documentation for your command](#generating-documentation-for-your-command) * [Generating bash completions](#generating-bash-completions) - * [Generating zsh completions](#generating-zsh-completions) - [Contributing](#contributing) - [License](#license) @@ -157,6 +152,9 @@ In a Cobra app, typically the main.go file is very bare. It serves one purpose: package main import ( + "fmt" + "os" + "{pathToYourApp}/cmd" ) @@ -267,6 +265,9 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose, package main import ( + "fmt" + "os" + "{pathToYourApp}/cmd" ) @@ -338,7 +339,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out A flag can also be assigned locally which will only apply to that specific command. ```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") ``` ### Local Flag on Parent Commands @@ -394,7 +395,6 @@ The following validators are built in: - `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. - `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. - `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` - `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. An example of setting the custom validator: @@ -404,7 +404,7 @@ var cmd = &cobra.Command{ Short: "hello", Args: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { - return errors.New("requires a color argument") + return errors.New("requires at least one arg") } if myapp.IsValidColor(args[0]) { return nil @@ -464,7 +464,7 @@ Echo works a lot like print, except it has a child command.`, } var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", + Use: "times [# times] [string to echo]", Short: "Echo anything to the screen more times", Long: `echo things multiple times back to the user by providing a count and a string.`, @@ -721,11 +721,6 @@ Cobra can generate documentation based on subcommands, flags, etc. in the follow Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). -## Generating zsh completions - -Cobra can generate zsh-completion file. Read more about it in -[Zsh Completions](zsh_completions.md). - # Contributing 1. Fork it diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index c4d820b853..a5d8a9273e 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -78,18 +78,6 @@ func ExactArgs(n int) PositionalArgs { } } -// ExactValidArgs returns an error if -// there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -func ExactValidArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if err := ExactArgs(n)(cmd, args); err != nil { - return err - } - return OnlyValidArgs(cmd, args) - } -} - // RangeArgs returns an error if the number of args is not within the expected range. func RangeArgs(min int, max int) PositionalArgs { return func(cmd *Command, args []string) error { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 57bb8e1b3f..8fa8f486fa 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -129,13 +129,7 @@ __%[1]s_handle_reply() fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi + declare -F __custom_func >/dev/null && __custom_func fi # available in bash-completion >= 2, not always present on macOS @@ -199,8 +193,7 @@ __%[1]s_handle_flag() fi # skip the argument to a two word flag - if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -380,10 +373,6 @@ func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { } format += "\")\n" buf.WriteString(fmt.Sprintf(format, name)) - if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - } writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } @@ -545,3 +534,51 @@ func (c *Command) GenBashCompletionFile(filename string) error { return c.GenBashCompletion(outFile) } + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index 4ac61ee132..e79d4769d1 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -1,40 +1,5 @@ # Generating Bash Completions For Your Own cobra.Command -If you are using the generator you can create a completion command by running - -```bash -cobra add completion -``` - -Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion) - -Writing the shell script to stdout allows the most flexible use. - -```go -// completionCmd represents the completion command -var completionCmd = &cobra.Command{ - Use: "completion", - Short: "Generates bash completion scripts", - Long: `To load completion run - -. <(bitbucket completion) - -To configure your bash shell to load completions for each session add to your bashrc - -# ~/.bashrc or ~/.profile -. <(bitbucket completion) -`, - Run: func(cmd *cobra.Command, args []string) { - rootCmd.GenBashCompletion(os.Stdout); - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script - - -## Example from kubectl - Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: ```go @@ -82,7 +47,7 @@ __kubectl_get_resource() fi } -__kubectl_custom_func() { +__custom_func() { case ${last_command} in kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) __kubectl_get_resource @@ -109,7 +74,7 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, } ``` -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! ## Have the completions code complete your 'nouns' diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index 6505c070b4..7010fd15b7 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -23,7 +23,6 @@ import ( "strconv" "strings" "text/template" - "time" "unicode" ) @@ -57,12 +56,6 @@ var MousetrapHelpText string = `This is a command line tool. You need to open cmd.exe and run it from there. ` -// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows -// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. -// To disable the mousetrap, just set MousetrapHelpText to blank string (""). -// Works only on Microsoft Windows. -var MousetrapDisplayDuration time.Duration = 5 * time.Second - // AddTemplateFunc adds a template function that's available to Usage and Help // template generation. func AddTemplateFunc(name string, tmplFunc interface{}) { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index c7e8983034..34d1bf3671 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -177,6 +177,8 @@ type Command struct { // that we can use on every pflag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + // output is an output writer defined by user. + output io.Writer // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. @@ -193,13 +195,6 @@ type Command struct { helpCommand *Command // versionTemplate is the version template defined by user. versionTemplate string - - // inReader is a reader defined by the user that replaces stdin - inReader io.Reader - // outWriter is a writer defined by the user that replaces stdout - outWriter io.Writer - // errWriter is a writer defined by the user that replaces stderr - errWriter io.Writer } // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden @@ -210,28 +205,8 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. -// Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { - c.outWriter = output - c.errWriter = output -} - -// SetOut sets the destination for usage messages. -// If newOut is nil, os.Stdout is used. -func (c *Command) SetOut(newOut io.Writer) { - c.outWriter = newOut -} - -// SetErr sets the destination for error messages. -// If newErr is nil, os.Stderr is used. -func (c *Command) SetErr(newErr io.Writer) { - c.errWriter = newErr -} - -// SetOut sets the source for input data -// If newIn is nil, os.Stdin is used. -func (c *Command) SetIn(newIn io.Reader) { - c.inReader = newIn + c.output = output } // SetUsageFunc sets usage function. Usage can be defined by application. @@ -292,19 +267,9 @@ func (c *Command) OutOrStderr() io.Writer { return c.getOut(os.Stderr) } -// ErrOrStderr returns output to stderr -func (c *Command) ErrOrStderr() io.Writer { - return c.getErr(os.Stderr) -} - -// ErrOrStderr returns output to stderr -func (c *Command) InOrStdin() io.Reader { - return c.getIn(os.Stdin) -} - func (c *Command) getOut(def io.Writer) io.Writer { - if c.outWriter != nil { - return c.outWriter + if c.output != nil { + return c.output } if c.HasParent() { return c.parent.getOut(def) @@ -312,26 +277,6 @@ func (c *Command) getOut(def io.Writer) io.Writer { return def } -func (c *Command) getErr(def io.Writer) io.Writer { - if c.errWriter != nil { - return c.errWriter - } - if c.HasParent() { - return c.parent.getErr(def) - } - return def -} - -func (c *Command) getIn(def io.Reader) io.Reader { - if c.inReader != nil { - return c.inReader - } - if c.HasParent() { - return c.parent.getIn(def) - } - return def -} - // UsageFunc returns either the function set by SetUsageFunc for this command // or a parent, or it returns a default usage function. func (c *Command) UsageFunc() (f func(*Command) error) { @@ -384,22 +329,13 @@ func (c *Command) Help() error { return nil } -// UsageString returns usage string. +// UsageString return usage string. func (c *Command) UsageString() string { - // Storing normal writers - tmpOutput := c.outWriter - tmpErr := c.errWriter - + tmpOutput := c.output bb := new(bytes.Buffer) - c.outWriter = bb - c.errWriter = bb - + c.SetOutput(bb) c.Usage() - - // Setting things back to normal - c.outWriter = tmpOutput - c.errWriter = tmpErr - + c.output = tmpOutput return bb.String() } @@ -881,11 +817,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // overriding c.InitDefaultHelpCmd() - args := c.args + var args []string // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] + } else { + args = c.args } var flags []string @@ -1132,21 +1070,6 @@ func (c *Command) Printf(format string, i ...interface{}) { c.Print(fmt.Sprintf(format, i...)) } -// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErr(i ...interface{}) { - fmt.Fprint(c.ErrOrStderr(), i...) -} - -// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrln(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - // CommandPath returns the full path to this command. func (c *Command) CommandPath() string { if c.HasParent() { @@ -1412,7 +1335,7 @@ func (c *Command) LocalFlags() *flag.FlagSet { return c.lflags } -// InheritedFlags returns all flags which were inherited from parent commands. +// InheritedFlags returns all flags which were inherited from parents commands. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 8768b1736d..edec728e4f 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -3,7 +3,6 @@ package cobra import ( - "fmt" "os" "time" @@ -15,12 +14,7 @@ var preExecHookFn = preExecHook func preExecHook(c *Command) { if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { c.Print(MousetrapHelpText) - if MousetrapDisplayDuration > 0 { - time.Sleep(MousetrapDisplayDuration) - } else { - c.Println("Press return to continue...") - fmt.Scanln() - } + time.Sleep(5 * time.Second) os.Exit(1) } } diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod deleted file mode 100644 index 9a9eb65a37..0000000000 --- a/vendor/github.com/spf13/cobra/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/spf13/cobra - -go 1.12 - -require ( - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/cpuguy83/go-md2man v1.0.10 - github.com/inconshreveable/mousetrap v1.0.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.3.2 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum deleted file mode 100644 index 9761f4d03f..0000000000 --- a/vendor/github.com/spf13/cobra/go.sum +++ /dev/null @@ -1,51 +0,0 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go deleted file mode 100644 index 756c61b9dc..0000000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ /dev/null @@ -1,100 +0,0 @@ -// PowerShell completions are based on the amazing work from clap: -// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs -// -// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but -// can be downloaded separately for windows 7 or 8.1). - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" - - "github.com/spf13/pflag" -) - -var powerShellCompletionTemplate = `using namespace System.Management.Automation -using namespace System.Management.Automation.Language -Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { - param($wordToComplete, $commandAst, $cursorPosition) - $commandElements = $commandAst.CommandElements - $command = @( - '%s' - for ($i = 1; $i -lt $commandElements.Count; $i++) { - $element = $commandElements[$i] - if ($element -isnot [StringConstantExpressionAst] -or - $element.StringConstantType -ne [StringConstantType]::BareWord -or - $element.Value.StartsWith('-')) { - break - } - $element.Value - } - ) -join ';' - $completions = @(switch ($command) {%s - }) - $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | - Sort-Object -Property ListItemText -}` - -func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { - var cmdName string - if previousCommandName == "" { - cmdName = cmd.Name() - } else { - cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) - } - - fmt.Fprintf(out, "\n '%s' {", cmdName) - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - usage := escapeStringForPowerShell(flag.Usage) - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) - } - fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) - }) - - for _, subCmd := range cmd.Commands() { - usage := escapeStringForPowerShell(subCmd.Short) - fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) - } - - fmt.Fprint(out, "\n break\n }") - - for _, subCmd := range cmd.Commands() { - generatePowerShellSubcommandCases(out, subCmd, cmdName) - } -} - -func escapeStringForPowerShell(s string) string { - return strings.Replace(s, "'", "''", -1) -} - -// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - - var subCommandCases bytes.Buffer - generatePowerShellSubcommandCases(&subCommandCases, c, "") - fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) - - _, err := buf.WriteTo(w) - return err -} - -// GenPowerShellCompletionFile generates PowerShell completion file. -func (c *Command) GenPowerShellCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenPowerShellCompletion(outFile) -} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index afed802408..0000000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,14 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -# What's supported - -- Completion for subcommands using their `.Short` description -- Completion for non-hidden flags using their `.Name` and `.Shorthand` - -# What's not yet supported - -- Command aliases -- Required, filename or custom flags (they will work like normal flags) -- Custom completion scripts diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go deleted file mode 100644 index ba0af9cb55..0000000000 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ /dev/null @@ -1,85 +0,0 @@ -package cobra - -import ( - "github.com/spf13/pflag" -) - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for this persistent flag to the -// specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this named flag. -// -// Shell Completion compatibility matrix: zsh -func (c *Command) MarkFlagDirname(name string) error { - return MarkFlagDirname(c.Flags(), name) -} - -// MarkPersistentFlagDirname instructs the various shell completion -// implementations to complete only directories with this persistent named flag. -// -// Shell Completion compatibility matrix: zsh -func (c *Command) MarkPersistentFlagDirname(name string) error { - return MarkFlagDirname(c.PersistentFlags(), name) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this specified flag. -// -// Shell Completion compatibility matrix: zsh -func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - zshPattern := "-(/)" - return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern}) -} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 12755482f0..889c22e273 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,102 +1,13 @@ package cobra import ( - "encoding/json" + "bytes" "fmt" "io" "os" - "sort" "strings" - "text/template" - - "github.com/spf13/pflag" -) - -const ( - zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation" - zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion" - zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion" - zshCompDirname = "cobra_annotations_zsh_dirname" -) - -var ( - zshCompFuncMap = template.FuncMap{ - "genZshFuncName": zshCompGenFuncName, - "extractFlags": zshCompExtractFlag, - "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments, - "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering, - } - zshCompletionText = ` -{{/* should accept Command (that contains subcommands) as parameter */}} -{{define "argumentsC" -}} -{{ $cmdPath := genZshFuncName .}} -function {{$cmdPath}} { - local -a commands - - _arguments -C \{{- range extractFlags .}} - {{genFlagEntryForZshArguments .}} \{{- end}} - "1: :->cmnds" \ - "*::arg:->args" - - case $state in - cmnds) - commands=({{range .Commands}}{{if not .Hidden}} - "{{.Name}}:{{.Short}}"{{end}}{{end}} - ) - _describe "command" commands - ;; - esac - - case "$words[1]" in {{- range .Commands}}{{if not .Hidden}} - {{.Name}}) - {{$cmdPath}}_{{.Name}} - ;;{{end}}{{end}} - esac -} -{{range .Commands}}{{if not .Hidden}} -{{template "selectCmdTemplate" .}} -{{- end}}{{end}} -{{- end}} - -{{/* should accept Command without subcommands as parameter */}} -{{define "arguments" -}} -function {{genZshFuncName .}} { -{{" _arguments"}}{{range extractFlags .}} \ - {{genFlagEntryForZshArguments . -}} -{{end}}{{range extractArgsCompletions .}} \ - {{.}}{{end}} -} -{{end}} - -{{/* dispatcher for commands with or without subcommands */}} -{{define "selectCmdTemplate" -}} -{{if .Hidden}}{{/* ignore hidden*/}}{{else -}} -{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}} -{{- end}} -{{- end}} - -{{/* template entry point */}} -{{define "Main" -}} -#compdef _{{.Name}} {{.Name}} - -{{template "selectCmdTemplate" .}} -{{end}} -` ) -// zshCompArgsAnnotation is used to encode/decode zsh completion for -// arguments to/from Command.Annotations. -type zshCompArgsAnnotation map[int]zshCompArgHint - -type zshCompArgHint struct { - // Indicates the type of the completion to use. One of: - // zshCompArgumentFilenameComp or zshCompArgumentWordComp - Tipe string `json:"type"` - - // A value for the type above (globs for file completion or words) - Options []string `json:"options"` -} - // GenZshCompletionFile generates zsh completion file. func (c *Command) GenZshCompletionFile(filename string) error { outFile, err := os.Create(filename) @@ -108,229 +19,108 @@ func (c *Command) GenZshCompletionFile(filename string) error { return c.GenZshCompletion(outFile) } -// GenZshCompletion generates a zsh completion file and writes to the passed -// writer. The completion always run on the root command regardless of the -// command it was called from. +// GenZshCompletion generates a zsh completion file and writes to the passed writer. func (c *Command) GenZshCompletion(w io.Writer) error { - tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText) - if err != nil { - return fmt.Errorf("error creating zsh completion template: %v", err) - } - return tmpl.Execute(w, c.Root()) -} - -// MarkZshCompPositionalArgumentFile marks the specified argument (first -// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are -// optional - if not provided the completion will search for all files. -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentFilenameComp, - Options: patterns, - } - return c.zshCompSetArgsAnnotations(annotation) -} - -// MarkZshCompPositionalArgumentWords marks the specified positional argument -// (first argument is 1) as completed by the provided words. At east one word -// must be provided, spaces within words will be offered completion with -// "word\ word". -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - if len(words) == 0 { - return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: words, - } - return c.zshCompSetArgsAnnotations(annotation) -} + buf := new(bytes.Buffer) -func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) { - var result []string - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return nil, err - } - for k, v := range annotation { - s, err := zshCompRenderZshCompArgHint(k, v) - if err != nil { - return nil, err - } - result = append(result, s) - } - if len(c.ValidArgs) > 0 { - if _, positionOneExists := annotation[1]; !positionOneExists { - s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: c.ValidArgs, - }) - if err != nil { - return nil, err - } - result = append(result, s) - } - } - sort.Strings(result) - return result, nil -} + writeHeader(buf, c) + maxDepth := maxDepth(c) + writeLevelMapping(buf, maxDepth) + writeLevelCases(buf, maxDepth, c) -func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) { - switch t := z.Tipe; t { - case zshCompArgumentFilenameComp: - var globs []string - for _, g := range z.Options { - globs = append(globs, fmt.Sprintf(`-g "%s"`, g)) - } - return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil - case zshCompArgumentWordComp: - var words []string - for _, w := range z.Options { - words = append(words, fmt.Sprintf("%q", w)) - } - return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil - default: - return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t) - } + _, err := buf.WriteTo(w) + return err } -func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool { - _, dup := annotation[position] - return dup +func writeHeader(w io.Writer, cmd *Command) { + fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) } -func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) { - annotation := make(zshCompArgsAnnotation) - annotationString, ok := c.Annotations[zshCompArgumentAnnotation] - if !ok { - return annotation, nil +func maxDepth(c *Command) int { + if len(c.Commands()) == 0 { + return 0 } - err := json.Unmarshal([]byte(annotationString), &annotation) - if err != nil { - return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err) - } - return annotation, nil -} - -func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error { - jsn, err := json.Marshal(annotation) - if err != nil { - return fmt.Errorf("Error marshaling zsh argument annotation: %v", err) - } - if c.Annotations == nil { - c.Annotations = make(map[string]string) + maxDepthSub := 0 + for _, s := range c.Commands() { + subDepth := maxDepth(s) + if subDepth > maxDepthSub { + maxDepthSub = subDepth + } } - c.Annotations[zshCompArgumentAnnotation] = string(jsn) - return nil + return 1 + maxDepthSub } -func zshCompGenFuncName(c *Command) string { - if c.HasParent() { - return zshCompGenFuncName(c.Parent()) + "_" + c.Name() +func writeLevelMapping(w io.Writer, numLevels int) { + fmt.Fprintln(w, `_arguments \`) + for i := 1; i <= numLevels; i++ { + fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) + fmt.Fprintln(w) } - return "_" + c.Name() + fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") + fmt.Fprintln(w) } -func zshCompExtractFlag(c *Command) []*pflag.Flag { - var flags []*pflag.Flag - c.LocalFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - c.InheritedFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - return flags -} +func writeLevelCases(w io.Writer, maxDepth int, root *Command) { + fmt.Fprintln(w, "case $state in") + defer fmt.Fprintln(w, "esac") -// zshCompGenFlagEntryForArguments returns an entry that matches _arguments -// zsh-completion parameters. It's too complicated to generate in a template. -func zshCompGenFlagEntryForArguments(f *pflag.Flag) string { - if f.Name == "" || f.Shorthand == "" { - return zshCompGenFlagEntryForSingleOptionFlag(f) + for i := 1; i <= maxDepth; i++ { + fmt.Fprintf(w, " level%d)\n", i) + writeLevel(w, root, i) + fmt.Fprintln(w, " ;;") } - return zshCompGenFlagEntryForMultiOptionFlag(f) + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") } -func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string { - var option, multiMark, extras string +func writeLevel(w io.Writer, root *Command, i int) { + fmt.Fprintf(w, " case $words[%d] in\n", i) + defer fmt.Fprintln(w, " esac") - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - multiMark = "*" - } + commands := filterByLevel(root, i) + byParent := groupByParent(commands) - option = "--" + f.Name - if option == "--" { - option = "-" + f.Shorthand + for p, c := range byParent { + names := names(c) + fmt.Fprintf(w, " %s)\n", p) + fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) + fmt.Fprintln(w, " ;;") } - extras = zshCompGenFlagEntryExtras(f) + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") - return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras) } -func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string { - var options, parenMultiMark, curlyMultiMark, extras string - - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - parenMultiMark = "*" - curlyMultiMark = "\\*" +func filterByLevel(c *Command, l int) []*Command { + cs := make([]*Command, 0) + if l == 0 { + cs = append(cs, c) + return cs } - - options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`, - parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name) - extras = zshCompGenFlagEntryExtras(f) - - return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras) -} - -func zshCompGenFlagEntryExtras(f *pflag.Flag) string { - if f.NoOptDefVal != "" { - return "" + for _, s := range c.Commands() { + cs = append(cs, filterByLevel(s, l-1)...) } + return cs +} - extras := ":" // allow options for flag (even without assistance) - for key, values := range f.Annotations { - switch key { - case zshCompDirname: - extras = fmt.Sprintf(":filename:_files -g %q", values[0]) - case BashCompFilenameExt: - extras = ":filename:_files" - for _, pattern := range values { - extras = extras + fmt.Sprintf(` -g "%s"`, pattern) - } +func groupByParent(commands []*Command) map[string][]*Command { + m := make(map[string][]*Command) + for _, c := range commands { + parent := c.Parent() + if parent == nil { + continue } + m[parent.Name()] = append(m[parent.Name()], c) } - - return extras -} - -func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool { - return strings.Contains(f.Value.Type(), "Slice") || - strings.Contains(f.Value.Type(), "Array") + return m } -func zshCompQuoteFlagDescription(s string) string { - return strings.Replace(s, "'", `'\''`, -1) +func names(commands []*Command) []string { + ns := make([]string, len(commands)) + for i, c := range commands { + ns[i] = c.Name() + } + return ns } diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index df9c2eac93..0000000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,39 +0,0 @@ -## Generating Zsh Completion for your cobra.Command - -Cobra supports native Zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` named -`_`. - -### What's Supported - -* Completion for all non-hidden subcommands using their `.Short` description. -* Completion for all non-hidden flags using the following rules: - * Filename completion works by marking the flag with `cmd.MarkFlagFilename...` - family of commands. - * The requirement for argument to the flag is decided by the `.NoOptDefVal` - flag value - if it's empty then completion will expect an argument. - * Flags of one of the various `*Array` and `*Slice` types supports multiple - specifications (with or without argument depending on the specific type). -* Completion of positional arguments using the following rules: - * Argument position for all options below starts at `1`. If argument position - `0` is requested it will raise an error. - * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob - patterns (e.g. `"*.log"`) are optional - if not specified it will offer to - complete all file types. - * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for - completion. At least one word is required. - * It's possible to specify completion for some arguments and leave some - unspecified (e.g. offer words for second argument but nothing for first - argument). This will cause no completion for first argument but words - completion for second argument. - * If no argument completion was specified for 1st argument (but optionally was - specified for 2nd) and the command has `ValidArgs` it will be used as - completion options for 1st argument. - * Argument completions only offered for commands with no subcommands. - -### What's not yet Supported - -* Custom completion scripts are not supported yet (We should probably create zsh - specific one, doesn't make sense to re-use the bash one as the functions will - be different). -* Whatever other feature you're looking for and doesn't exist :) diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml index bd6b66ee83..73c8571c33 100644 --- a/vendor/go.opencensus.io/.travis.yml +++ b/vendor/go.opencensus.io/.travis.yml @@ -1,17 +1,27 @@ language: go -go_import_path: go.opencensus.io - go: + # 1.8 is tested by AppVeyor - 1.11.x -env: - global: - GO111MODULE=on +go_import_path: go.opencensus.io + +# Don't email me the results of the test runs. +notifications: + email: false before_script: - - make install-tools + - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any + - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any + - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release + - go get github.com/rakyll/embedmd script: - - make travis-ci - - go run internal/check/version.go # TODO move this to makefile + - embedmd -d README.md # Ensure embedded code is up-to-date + - go build ./... # Ensure dependency updates don't break build + - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi + - go vet ./... + - go test -v -race $PKGS # Run all the tests with the race detector enabled + - GOARCH=386 go test -v $PKGS # Run all tests against a 386 architecture + - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' + - go run internal/check/version.go diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md index 1ba3962c8b..3f3aed3961 100644 --- a/vendor/go.opencensus.io/CONTRIBUTING.md +++ b/vendor/go.opencensus.io/CONTRIBUTING.md @@ -41,8 +41,7 @@ git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git Run tests: ``` -$ make install-tools # Only first time. -$ make +$ go test ./... ``` Checkout a new branch, make modifications and push the branch to your fork: @@ -55,9 +54,3 @@ $ git push fork feature ``` Open a pull request against the main opencensus-go repo. - -## General Notes -This project uses Appveyor and Travis for CI. - -The dependencies are managed with `go mod` if you work with the sources under your -`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile deleted file mode 100644 index 457866cb1f..0000000000 --- a/vendor/go.opencensus.io/Makefile +++ /dev/null @@ -1,96 +0,0 @@ -# TODO: Fix this on windows. -ALL_SRC := $(shell find . -name '*.go' \ - -not -path './vendor/*' \ - -not -path '*/gen-go/*' \ - -type f | sort) -ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) - -GOTEST_OPT?=-v -race -timeout 30s -GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic -GOTEST=go test -GOFMT=gofmt -GOLINT=golint -GOVET=go vet -EMBEDMD=embedmd -# TODO decide if we need to change these names. -TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" -TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" -README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') - -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test - -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test - -# TODO enable test-with-coverage in tavis -.PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 - -all-pkgs: - @echo $(ALL_PKGS) | tr ' ' '\n' | sort - -all-srcs: - @echo $(ALL_SRC) | tr ' ' '\n' | sort - -.PHONY: test -test: - $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) - -.PHONY: test-386 -test-386: - GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) - -.PHONY: test-with-coverage -test-with-coverage: - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ - exit 1; \ - else \ - echo "Fmt finished successfully"; \ - fi - -.PHONY: lint -lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi - -.PHONY: install-tools -install-tools: - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/lint/golint - go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index 3f40ed5cbb..97d66983d3 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -29,7 +29,7 @@ integration with your RPC framework: * [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) * [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) * [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) * [Groupcache](https://godoc.org/github.com/orijtech/groupcache) * [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) @@ -123,7 +123,7 @@ Currently three types of aggregations are supported: [embedmd]:# (internal/readme/stats.go aggs) ```go -distAgg := view.Distribution(1<<32, 2<<32, 3<<32) +distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() ``` @@ -136,7 +136,7 @@ if err := view.Register(&view.View{ Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), + Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to register view: %v", err) } diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml index 12bd7c4c73..98057888a3 100644 --- a/vendor/go.opencensus.io/appveyor.yml +++ b/vendor/go.opencensus.io/appveyor.yml @@ -12,7 +12,6 @@ environment: install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 - go version - go env diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go new file mode 100644 index 0000000000..e676df837f --- /dev/null +++ b/vendor/go.opencensus.io/exemplar/exemplar.go @@ -0,0 +1,78 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar implements support for exemplars. Exemplars are additional +// data associated with each measurement. +// +// Their purpose it to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +package exemplar + +import ( + "context" + "time" +) + +const ( + KeyTraceID = "trace_id" + KeySpanID = "span_id" + KeyPrefixTag = "tag:" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +// The map should only be mutated from AttachmentExtractor functions. +type Attachments map[string]string + +// AttachmentExtractor is a function capable of extracting exemplar attachments +// from the context used to record measurements. +// The map passed to the function should be mutated and returned. It will +// initially be nil: the first AttachmentExtractor that would like to add keys to the +// map is responsible for initializing it. +type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments + +var extractors []AttachmentExtractor + +// RegisterAttachmentExtractor registers the given extractor associated with the exemplar +// type name. +// +// Extractors will be used to attempt to extract exemplars from the context +// associated with each recorded measurement. +// +// Packages that support exemplars should register their extractor functions on +// initialization. +// +// RegisterAttachmentExtractor should not be called after any measurements have +// been recorded. +func RegisterAttachmentExtractor(e AttachmentExtractor) { + extractors = append(extractors, e) +} + +// NewFromContext extracts exemplars from the given context. +// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an +// unspecified order to add attachments to the exemplar. +func AttachmentsFromContext(ctx context.Context) Attachments { + var a Attachments + for _, extractor := range extractors { + a = extractor(ctx, a) + } + return a +} diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index 8b7d38e91b..1236f4c2f3 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -1,10 +1,25 @@ module go.opencensus.io require ( + git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/ghodss/yaml v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 - github.com/hashicorp/golang-lru v0.5.0 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect - google.golang.org/grpc v1.19.0 + github.com/grpc-ecosystem/grpc-gateway v1.5.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/openzipkin/zipkin-go v0.1.1 + github.com/prometheus/client_golang v0.8.0 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e + github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e + golang.org/x/text v0.3.0 + google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf + google.golang.org/genproto v0.0.0-20180831171423-11092d34479b + google.golang.org/grpc v1.14.0 + gopkg.in/yaml.v2 v2.2.1 // indirect ) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index cbb37036d4..3e0bab8845 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -1,50 +1,48 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index 9a638781cf..e1d1238d01 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -18,12 +18,12 @@ import ( "fmt" "time" - opencensus "go.opencensus.io" + "go.opencensus.io" ) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) +var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go index 41b2c3fc03..3b1af8b4b8 100644 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -17,7 +17,6 @@ // used interally by the stats collector. package tagencoding // import "go.opencensus.io/internal/tagencoding" -// Values represent the encoded buffer for the values. type Values struct { Buffer []byte WriteIndex int @@ -32,7 +31,6 @@ func (vb *Values) growIfRequired(expected int) { } } -// WriteValue is the helper method to encode Values from map[Key][]byte. func (vb *Values) WriteValue(v []byte) { length := len(v) & 0xff vb.growIfRequired(1 + length) @@ -51,7 +49,7 @@ func (vb *Values) WriteValue(v []byte) { vb.WriteIndex += length } -// ReadValue is the helper method to decode Values to a map[Key][]byte. +// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. func (vb *Values) ReadValue() []byte { // read length of v length := int(vb.Buffer[vb.ReadIndex]) @@ -69,7 +67,6 @@ func (vb *Values) ReadValue() []byte { return v } -// Bytes returns a reference to already written bytes in the Buffer. func (vb *Values) Bytes() []byte { return vb.Buffer[:vb.WriteIndex] } diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go index 073af7b473..553ca68dc4 100644 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -22,7 +22,6 @@ import ( // TODO(#412): remove this var Trace interface{} -// LocalSpanStoreEnabled true if the local span store is enabled. var LocalSpanStoreEnabled bool // BucketConfiguration stores the number of samples to store for span buckets diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go deleted file mode 100644 index 12695ce2dc..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Exemplars keys. -const ( - AttachmentKeySpanContext = "SpanContext" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -// -// Their purpose is to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go deleted file mode 100644 index aadae41e6a..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/label.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// LabelKey represents key of a label. It has optional -// description attribute. -type LabelKey struct { - Key string - Description string -} - -// LabelValue represents the value of a label. -// The zero value represents a missing label value, which may be treated -// differently to an empty string value by some back ends. -type LabelValue struct { - Value string // string value of the label - Present bool // flag that indicated whether a value is present or not -} - -// NewLabelValue creates a new non-nil LabelValue that represents the given string. -func NewLabelValue(val string) LabelValue { - return LabelValue{Value: val, Present: true} -} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go deleted file mode 100644 index 8293712c77..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/metric.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" - - "go.opencensus.io/resource" -) - -// Descriptor holds metadata about a metric. -type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []LabelKey // label keys -} - -// Metric represents a quantity measured against a resource with different -// label value combinations. -type Metric struct { - Descriptor Descriptor // metric descriptor - Resource *resource.Resource // resource against which this was measured - TimeSeries []*TimeSeries // one time series for each combination of label values -} - -// TimeSeries is a sequence of points associated with a combination of label -// values. -type TimeSeries struct { - LabelValues []LabelValue // label values, same order as keys in the metric descriptor - Points []Point // points sequence - StartTime time.Time // time we started recording this time series -} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go deleted file mode 100644 index 7fe057b19c..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/point.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Point is a single data point of a time series. -type Point struct { - // Time is the point in time that this point represents in a time series. - Time time.Time - // Value is the value of this point. Prefer using ReadValue to switching on - // the value type, since new value types might be added. - Value interface{} -} - -//go:generate stringer -type ValueType - -// NewFloat64Point creates a new Point holding a float64 value. -func NewFloat64Point(t time.Time, val float64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewInt64Point creates a new Point holding an int64 value. -func NewInt64Point(t time.Time, val int64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewDistributionPoint creates a new Point holding a Distribution value. -func NewDistributionPoint(t time.Time, val *Distribution) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewSummaryPoint creates a new Point holding a Summary value. -func NewSummaryPoint(t time.Time, val *Summary) Point { - return Point{ - Value: val, - Time: t, - } -} - -// ValueVisitor allows reading the value of a point. -type ValueVisitor interface { - VisitFloat64Value(float64) - VisitInt64Value(int64) - VisitDistributionValue(*Distribution) - VisitSummaryValue(*Summary) -} - -// ReadValue accepts a ValueVisitor and calls the appropriate method with the -// value of this point. -// Consumers of Point should use this in preference to switching on the type -// of the value directly, since new value types may be added. -func (p Point) ReadValue(vv ValueVisitor) { - switch v := p.Value.(type) { - case int64: - vv.VisitInt64Value(v) - case float64: - vv.VisitFloat64Value(v) - case *Distribution: - vv.VisitDistributionValue(v) - case *Summary: - vv.VisitSummaryValue(v) - default: - panic("unexpected value type") - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type Distribution struct { - // Count is the number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 - // Sum is the sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 - // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 - // BucketOptions describes the bounds of the histogram buckets in this - // distribution. - // - // A Distribution may optionally contain a histogram of the values in the - // population. - // - // If nil, there is no associated histogram. - BucketOptions *BucketOptions - // Bucket If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []Bucket -} - -// BucketOptions describes the bounds of the histogram buckets in this -// distribution. -type BucketOptions struct { - // Bounds specifies a set of bucket upper bounds. - // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket - // index i are: - // - // [0, Bounds[i]) for i == 0 - // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 - // [Bounds[i-1], +infinity) for i == N-1 - Bounds []float64 -} - -// Bucket represents a single bucket (value range) in a distribution. -type Bucket struct { - // Count is the number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 - // Exemplar associated with this bucket (if any). - Exemplar *Exemplar -} - -// Summary is a representation of percentiles. -type Summary struct { - // Count is the cumulative count (if available). - Count int64 - // Sum is the cumulative sum of values (if available). - Sum float64 - // HasCountAndSum is true if Count and Sum are available. - HasCountAndSum bool - // Snapshot represents percentiles calculated over an arbitrary time window. - // The values in this struct can be reset at arbitrary unknown times, with - // the requirement that all of them are reset at the same time. - Snapshot Snapshot -} - -// Snapshot represents percentiles over an arbitrary time. -// The values in this struct can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type Snapshot struct { - // Count is the number of values in the snapshot. Optional since some systems don't - // expose this. Set to 0 if not available. - Count int64 - // Sum is the sum of values in the snapshot. Optional since some systems don't - // expose this. If count is 0 then this field must be zero. - Sum float64 - // Percentiles is a map from percentile (range (0-100.0]) to the value of - // the percentile. - Percentiles map[float64]float64 -} - -//go:generate stringer -type Type - -// Type is the overall type of metric, including its value type and whether it -// represents a cumulative total (since the start time) or if it represents a -// gauge value. -type Type int - -// Metric types. -const ( - TypeGaugeInt64 Type = iota - TypeGaugeFloat64 - TypeGaugeDistribution - TypeCumulativeInt64 - TypeCumulativeFloat64 - TypeCumulativeDistribution - TypeSummary -) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go deleted file mode 100644 index c3f8ec27b5..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/type_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package metricdata - -import "strconv" - -const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" - -var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go deleted file mode 100644 index ca1f390493..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/manager.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "sync" -) - -// Manager maintains a list of active producers. Producers can register -// with the manager to allow readers to read all metrics provided by them. -// Readers can retrieve all producers registered with the manager, -// read metrics from the producers and export them. -type Manager struct { - mu sync.RWMutex - producers map[Producer]struct{} -} - -var prodMgr *Manager -var once sync.Once - -// GlobalManager is a single instance of producer manager -// that is used by all producers and all readers. -func GlobalManager() *Manager { - once.Do(func() { - prodMgr = &Manager{} - prodMgr.producers = make(map[Producer]struct{}) - }) - return prodMgr -} - -// AddProducer adds the producer to the Manager if it is not already present. -func (pm *Manager) AddProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - pm.producers[producer] = struct{}{} -} - -// DeleteProducer deletes the producer from the Manager if it is present. -func (pm *Manager) DeleteProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - delete(pm.producers, producer) -} - -// GetAll returns a slice of all producer currently registered with -// the Manager. For each call it generates a new slice. The slice -// should not be cached as registration may change at any time. It is -// typically called periodically by exporter to read metrics from -// the producers. -func (pm *Manager) GetAll() []Producer { - pm.mu.Lock() - defer pm.mu.Unlock() - producers := make([]Producer, len(pm.producers)) - i := 0 - for producer := range pm.producers { - producers[i] = producer - i++ - } - return producers -} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go deleted file mode 100644 index 6cee9ed178..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/producer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "go.opencensus.io/metric/metricdata" -) - -// Producer is a source of metrics. -type Producer interface { - // Read should return the current values of all metrics supported by this - // metric provider. - // The returned metrics should be unique for each combination of name and - // resource. - Read() []*metricdata.Metric -} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index d2565f1e2b..62f03486a2 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.21.0" + return "0.18.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go deleted file mode 100644 index a6c466ae82..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "go.opencensus.io/trace" - "golang.org/x/net/context" - - "google.golang.org/grpc/stats" -) - -// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and -// traces. Use with gRPC clients only. -type ClientHandler struct { - // StartOptions allows configuring the StartOptions used to create new spans. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this handler. - StartOptions trace.StartOptions -} - -// HandleConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = c.traceTagRPC(ctx, rti) - ctx = c.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go deleted file mode 100644 index abe978b67b..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ClientHandler: -var ( - ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) -) - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ClientSentBytesPerRPCView = &view.View{ - Measure: ClientSentBytesPerRPC, - Name: "grpc.io/client/sent_bytes_per_rpc", - Description: "Distribution of bytes sent per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientReceivedBytesPerRPCView = &view.View{ - Measure: ClientReceivedBytesPerRPC, - Name: "grpc.io/client/received_bytes_per_rpc", - Description: "Distribution of bytes received per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientRoundtripLatencyView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/roundtrip_latency", - Description: "Distribution of round-trip latency, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } - - ClientCompletedRPCsView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - Aggregation: view.Count(), - } - - ClientSentMessagesPerRPCView = &view.View{ - Measure: ClientSentMessagesPerRPC, - Name: "grpc.io/client/sent_messages_per_rpc", - Description: "Distribution of sent messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientReceivedMessagesPerRPCView = &view.View{ - Measure: ClientReceivedMessagesPerRPC, - Name: "grpc.io/client/received_messages_per_rpc", - Description: "Distribution of received messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientServerLatencyView = &view.View{ - Measure: ClientServerLatency, - Name: "grpc.io/client/server_latency", - Description: "Distribution of server latency as viewed by client, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } -) - -// DefaultClientViews are the default client views provided by this package. -var DefaultClientViews = []*view.View{ - ClientSentBytesPerRPCView, - ClientReceivedBytesPerRPCView, - ClientRoundtripLatencyView, - ClientCompletedRPCsView, -} - -// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go deleted file mode 100644 index 303c607f63..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "go.opencensus.io/tag" - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the tag.Map populated by the application code, serializes -// its tags into the GRPC metadata in order to be sent to the server. -func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) - } - return ctx - } - - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - ts := tag.FromContext(ctx) - if ts != nil { - encoded := tag.Encode(ts) - ctx = stats.SetTags(ctx, encoded) - } - - return context.WithValue(ctx, rpcDataKey, d) -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go deleted file mode 100644 index 1370323fb7..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ocgrpc contains OpenCensus stats and trace -// integrations for gRPC. -// -// Use ServerHandler for servers and ClientHandler for clients. -package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go deleted file mode 100644 index b67b3e2be2..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "go.opencensus.io/trace" - "golang.org/x/net/context" - - "google.golang.org/grpc/stats" -) - -// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and -// traces. Use with gRPC servers. -// -// When installed (see Example), tracing metadata is read from inbound RPCs -// by default. If no tracing metadata is present, or if the tracing metadata is -// present but the SpanContext isn't sampled, then a new trace may be started -// (as determined by Sampler). -type ServerHandler struct { - // IsPublicEndpoint may be set to true to always start a new trace around - // each RPC. Any SpanContext in the RPC metadata will be added as a linked - // span instead of making it the parent of the span created around the - // server RPC. - // - // Be aware that if you leave this false (the default) on a public-facing - // server, callers will be able to send tracing metadata in gRPC headers - // and trigger traces in your backend. - IsPublicEndpoint bool - - // StartOptions to use for to spans started around RPCs handled by this server. - // - // These will apply even if there is tracing metadata already - // present on the inbound RPC but the SpanContext is not sampled. This - // ensures that each service has some opportunity to be traced. If you would - // like to not add any additional traces for this gRPC service, set: - // - // StartOptions.Sampler = trace.ProbabilitySampler(0.0) - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this handler. - StartOptions trace.StartOptions -} - -var _ stats.Handler = (*ServerHandler)(nil) - -// HandleConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = s.traceTagRPC(ctx, rti) - ctx = s.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go deleted file mode 100644 index 609d9ed248..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ServerHandler: -var ( - ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) -) - -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ServerReceivedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_bytes_per_rpc", - Description: "Distribution of received bytes per RPC, by method.", - Measure: ServerReceivedBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", - Description: "Distribution of total sent bytes per RPC, by method.", - Measure: ServerSentBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerLatencyView = &view.View{ - Name: "grpc.io/server/server_latency", - Description: "Distribution of server latency in milliseconds, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerLatency, - Aggregation: DefaultMillisecondsDistribution, - } - - ServerCompletedRPCsView = &view.View{ - Name: "grpc.io/server/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, - Measure: ServerLatency, - Aggregation: view.Count(), - } - - ServerReceivedMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/received_messages_per_rpc", - Description: "Distribution of messages received count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerReceivedMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } - - ServerSentMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_messages_per_rpc", - Description: "Distribution of messages sent count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerSentMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } -) - -// DefaultServerViews are the default server views provided by this package. -var DefaultServerViews = []*view.View{ - ServerReceivedBytesPerRPCView, - ServerSentBytesPerRPCView, - ServerLatencyView, - ServerCompletedRPCsView, -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go deleted file mode 100644 index 7847c1a912..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "golang.org/x/net/context" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from -// it and creates a new tag.Map and puts them into the returned context. -func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("opencensus: TagRPC called with nil info.") - } - return ctx - } - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - propagated := h.extractPropagatedTags(ctx) - ctx = tag.NewContext(ctx, propagated) - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) - return context.WithValue(ctx, rpcDataKey, d) -} - -// extractPropagatedTags creates a new tag map containing the tags extracted from the -// gRPC metadata. -func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { - buf := stats.Tags(ctx) - if buf == nil { - return nil - } - propagated, err := tag.Decode(buf) - if err != nil { - if grpclog.V(2) { - grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) - } - return nil - } - return propagated -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go deleted file mode 100644 index 0ae5691822..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -type grpcInstrumentationKey string - -// rpcData holds the instrumentation RPC data that is needed between the start -// and end of an call. It holds the info that this package needs to keep track -// of between the various GRPC events. -type rpcData struct { - // reqCount and respCount has to be the first words - // in order to be 64-aligned on 32-bit architectures. - sentCount, sentBytes, recvCount, recvBytes int64 // access atomically - - // startTime represents the time at which TagRPC was invoked at the - // beginning of an RPC. It is an appoximation of the time when the - // application code invoked GRPC code. - startTime time.Time - method string -} - -// The following variables define the default hard-coded auxiliary data used by -// both the default GRPC client and GRPC server metrics. -var ( - DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// Server tags are applied to the context used to process each RPC, as well as -// the measures at the end of each RPC. -var ( - KeyServerMethod, _ = tag.NewKey("grpc_server_method") - KeyServerStatus, _ = tag.NewKey("grpc_server_status") -) - -// Client tags are applied to measures at the end of each RPC. -var ( - KeyClientMethod, _ = tag.NewKey("grpc_client_method") - KeyClientStatus, _ = tag.NewKey("grpc_client_status") -) - -var ( - rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") -) - -func methodName(fullname string) string { - return strings.TrimLeft(fullname, "/") -} - -// statsHandleRPC processes the RPC events. -func statsHandleRPC(ctx context.Context, s stats.RPCStats) { - switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: - // do nothing for client - case *stats.OutPayload: - handleRPCOutPayload(ctx, st) - case *stats.InPayload: - handleRPCInPayload(ctx, st) - case *stats.End: - handleRPCEnd(ctx, st) - default: - grpclog.Infof("unexpected stats: %T", st) - } -} - -func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.sentBytes, int64(s.Length)) - atomic.AddInt64(&d.sentCount, 1) -} - -func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.recvBytes, int64(s.Length)) - atomic.AddInt64(&d.recvCount, 1) -} - -func handleRPCEnd(ctx context.Context, s *stats.End) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - elapsedTime := time.Since(d.startTime) - - var st string - if s.Error != nil { - s, ok := status.FromError(s.Error) - if ok { - st = statusCodeToString(s) - } - } else { - st = "OK" - } - - latencyMillis := float64(elapsedTime) / float64(time.Millisecond) - attachments := getSpanCtxAttachment(ctx) - if s.Client { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyServerStatus, st), - ), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis))) - } -} - -func statusCodeToString(s *status.Status) string { - // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md - switch c := s.Code(); c { - case codes.OK: - return "OK" - case codes.Canceled: - return "CANCELLED" - case codes.Unknown: - return "UNKNOWN" - case codes.InvalidArgument: - return "INVALID_ARGUMENT" - case codes.DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case codes.NotFound: - return "NOT_FOUND" - case codes.AlreadyExists: - return "ALREADY_EXISTS" - case codes.PermissionDenied: - return "PERMISSION_DENIED" - case codes.ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case codes.FailedPrecondition: - return "FAILED_PRECONDITION" - case codes.Aborted: - return "ABORTED" - case codes.OutOfRange: - return "OUT_OF_RANGE" - case codes.Unimplemented: - return "UNIMPLEMENTED" - case codes.Internal: - return "INTERNAL" - case codes.Unavailable: - return "UNAVAILABLE" - case codes.DataLoss: - return "DATA_LOSS" - case codes.Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE_" + strconv.FormatInt(int64(c), 10) - } -} - -func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { - attachments := map[string]interface{}{} - span := trace.FromContext(ctx) - if span == nil { - return attachments - } - spanCtx := span.SpanContext() - if spanCtx.IsSampled() { - attachments[metricdata.AttachmentKeySpanContext] = spanCtx - } - return attachments -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go deleted file mode 100644 index 720f381c27..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "strings" - - "google.golang.org/grpc/codes" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -const traceContextKey = "grpc-trace-bin" - -// TagRPC creates a new trace span for the client side of the RPC. -// -// It returns ctx with the new trace span added and a serialization of the -// SpanContext added to the outgoing gRPC metadata. -func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - ctx, span := trace.StartSpan(ctx, name, - trace.WithSampler(c.StartOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC - traceContextBinary := propagation.Binary(span.SpanContext()) - return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) -} - -// TagRPC creates a new trace span for the server side of the RPC. -// -// It checks the incoming gRPC metadata in ctx for a SpanContext, and if -// it finds one, uses that SpanContext as the parent context of the new span. -// -// It returns ctx, with the new trace span added. -func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - md, _ := metadata.FromIncomingContext(ctx) - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - traceContext := md[traceContextKey] - var ( - parent trace.SpanContext - haveParent bool - ) - if len(traceContext) > 0 { - // Metadata with keys ending in -bin are actually binary. They are base64 - // encoded before being put on the wire, see: - // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata - traceContextBinary := []byte(traceContext[0]) - parent, haveParent = propagation.FromBinary(traceContextBinary) - if haveParent && !s.IsPublicEndpoint { - ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler), - ) - return ctx - } - } - ctx, span := trace.StartSpan(ctx, name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler)) - if haveParent { - span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) - } - return ctx -} - -func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - span := trace.FromContext(ctx) - // TODO: compressed and uncompressed sizes are not populated in every message. - switch rs := rs.(type) { - case *stats.Begin: - span.AddAttributes( - trace.BoolAttribute("Client", rs.Client), - trace.BoolAttribute("FailFast", rs.FailFast)) - case *stats.InPayload: - span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) - case *stats.OutPayload: - span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) - case *stats.End: - if rs.Error != nil { - s, ok := status.FromError(rs.Error) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) - } - } - span.End() - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go index 17142aabe0..066ebb87f8 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -34,8 +34,8 @@ type statsTransport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.Host), - tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientHost, req.URL.Host), + tag.Upsert(Host, req.URL.Host), tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), tag.Upsert(KeyClientMethod, req.Method), @@ -61,14 +61,11 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.statusCode = resp.StatusCode - if req.Method != "HEAD" { - track.respContentLength = resp.ContentLength - } if resp.Body == nil { track.end() } else { track.body = resp.Body - resp.Body = wrappedBody(track, resp.Body) + resp.Body = track } } return resp, err @@ -85,14 +82,13 @@ func (t statsTransport) CancelRequest(req *http.Request) { } type tracker struct { - ctx context.Context - respSize int64 - respContentLength int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once + ctx context.Context + respSize int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once } var _ io.ReadCloser = (*tracker)(nil) @@ -100,13 +96,9 @@ var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - respSize := t.respSize - if t.respSize == 0 && t.respContentLength > 0 { - respSize = t.respContentLength - } m := []stats.Measurement{ ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(respSize), + ClientReceivedBytes.M(t.respSize), ClientRoundtripLatency.M(latencyMs), ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), @@ -124,9 +116,9 @@ func (t *tracker) end() { func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) - t.respSize += int64(n) switch err { case nil: + t.respSize += int64(n) return n, nil case io.EOF: t.end() diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index 2f1c7f0063..f777772ec9 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -38,7 +38,7 @@ const ( // because there are additional fields not represented in the // OpenCensus span context. Spans created from the incoming // header will be the direct children of the client-side span. -// Similarly, receiver of the outgoing spans should use client-side +// Similarly, reciever of the outgoing spans should use client-side // span created by OpenCensus as the parent. type HTTPFormat struct{} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go index 5e6a343076..dbe22d5861 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -15,21 +15,11 @@ package ochttp import ( - "context" "net/http" "go.opencensus.io/tag" ) -// SetRoute sets the http_server_route tag to the given value. -// It's useful when an HTTP framework does not support the http.Handler interface -// and using WithRouteTag is not an option, but provides a way to hook into the request flow. -func SetRoute(ctx context.Context, route string) { - if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) - } -} - // WithRouteTag returns an http.Handler that records stats with the // http_server_route tag set to the given value. func WithRouteTag(handler http.Handler, route string) http.Handler { diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 4f6404fa79..ff72de97a8 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -118,18 +118,12 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ span.AddLink(trace.Link{ TraceID: sc.TraceID, SpanID: sc.SpanID, - Type: trace.LinkTypeParent, + Type: trace.LinkTypeChild, Attributes: nil, }) } } span.AddAttributes(requestAttrs(r)...) - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - } else if r.ContentLength > 0 { - span.AddMessageReceiveEvent(0, /* TODO: messageID */ - int64(r.ContentLength), -1) - } return r.WithContext(ctx), span.End } @@ -142,7 +136,7 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.Host), + tag.Upsert(Host, r.URL.Host), tag.Upsert(Path, r.URL.Path), tag.Upsert(Method, r.Method)) track := &trackingResponseWriter{ @@ -207,9 +201,6 @@ func (t *trackingResponseWriter) Header() http.Header { func (t *trackingResponseWriter) Write(data []byte) (int, error) { n, err := t.writer.Write(data) t.respSize += int64(n) - // Add message event for request bytes sent. - span := trace.FromContext(t.ctx) - span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) return n, err } diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 63bbcda5e3..46dcc8e57e 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -20,31 +20,19 @@ import ( "go.opencensus.io/tag" ) -// Deprecated: client HTTP measures. +// The following client HTTP measures are supported for use in custom views. var ( // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64( - "opencensus.io/http/client/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64( - "opencensus.io/http/client/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) + ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64( - "opencensus.io/http/client/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) + ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64( - "opencensus.io/http/client/latency", - "End-to-end latency", - stats.UnitMilliseconds) + ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) ) -// The following client HTTP measures are supported for use in custom views. +// Client measures supported for use in custom views. var ( ClientSentBytes = stats.Int64( "opencensus.io/http/client/sent_bytes", @@ -65,22 +53,10 @@ var ( // The following server HTTP measures are supported for use in custom views: var ( - ServerRequestCount = stats.Int64( - "opencensus.io/http/server/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - ServerRequestBytes = stats.Int64( - "opencensus.io/http/server/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - ServerResponseBytes = stats.Int64( - "opencensus.io/http/server/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - ServerLatency = stats.Float64( - "opencensus.io/http/server/latency", - "End-to-end latency", - stats.UnitMilliseconds) + ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) ) // The following tags are applied to stats recorded by this package. Host, Path @@ -128,11 +104,11 @@ var ( // Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) -// Package ochttp provides some convenience views for client measures. +// Package ochttp provides some convenience views. // You still need to register these views for data to actually be collected. var ( ClientSentBytesDistribution = &view.View{ @@ -168,7 +144,6 @@ var ( } ) -// Deprecated: Old client Views. var ( // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ @@ -186,7 +161,7 @@ var ( Aggregation: DefaultSizeDistribution, } - // Deprecated: Use ClientReceivedBytesDistribution instead. + // Deprecated: Use ClientReceivedBytesDistribution. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", @@ -194,7 +169,7 @@ var ( Aggregation: DefaultSizeDistribution, } - // Deprecated: Use ClientRoundtripLatencyDistribution instead. + // Deprecated: Use ClientRoundtripLatencyDistribution. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", @@ -202,7 +177,7 @@ var ( Aggregation: DefaultLatencyDistribution, } - // Deprecated: Use ClientCompletedCount instead. + // Deprecated: Use ClientCompletedCount. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", @@ -211,7 +186,7 @@ var ( Aggregation: view.Count(), } - // Deprecated: Use ClientCompletedCount instead. + // Deprecated: Use ClientCompletedCount. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", @@ -221,8 +196,6 @@ var ( } ) -// Package ochttp provides some convenience views for server measures. -// You still need to register these views for data to actually be collected. var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index c23b97fb1f..819a2d5ff9 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -34,7 +34,6 @@ const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" - URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) @@ -94,8 +93,7 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - bt := &bodyTracker{rc: resp.Body, span: span} - resp.Body = wrappedBody(bt, resp.Body) + resp.Body = &bodyTracker{rc: resp.Body, span: span} return resp, err } @@ -151,21 +149,12 @@ func spanNameFromURL(req *http.Request) string { } func requestAttrs(r *http.Request) []trace.Attribute { - userAgent := r.UserAgent() - - attrs := make([]trace.Attribute, 0, 5) - attrs = append(attrs, + return []trace.Attribute{ trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(URLAttribute, r.URL.String()), - trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(HostAttribute, r.URL.Host), trace.StringAttribute(MethodAttribute, r.Method), - ) - - if userAgent != "" { - attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + trace.StringAttribute(UserAgentAttribute, r.UserAgent()), } - - return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go deleted file mode 100644 index 7d75cae2b1..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" -) - -// wrappedBody returns a wrapped version of the original -// Body and only implements the same combination of additional -// interfaces as the original. -func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { - var ( - wr, i0 = body.(io.Writer) - ) - switch { - case !i0: - return struct { - io.ReadCloser - }{wrapper} - - case i0: - return struct { - io.ReadCloser - io.Writer - }{wrapper, wr} - default: - return struct { - io.ReadCloser - }{wrapper} - } -} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go deleted file mode 100644 index b1764e1d3b..0000000000 --- a/vendor/go.opencensus.io/resource/resource.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resource provides functionality for resource, which capture -// identifying information about the entities for which signals are exported. -package resource - -import ( - "context" - "fmt" - "os" - "regexp" - "sort" - "strconv" - "strings" -) - -// Environment variables used by FromEnv to decode a resource. -const ( - EnvVarType = "OC_RESOURCE_TYPE" - EnvVarLabels = "OC_RESOURCE_LABELS" -) - -// Resource describes an entity about which identifying information and metadata is exposed. -// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. -type Resource struct { - Type string - Labels map[string]string -} - -// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. -func EncodeLabels(labels map[string]string) string { - sortedKeys := make([]string, 0, len(labels)) - for k := range labels { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - s := "" - for i, k := range sortedKeys { - if i > 0 { - s += "," - } - s += k + "=" + strconv.Quote(labels[k]) - } - return s -} - -var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) - -// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. -// A list of labels of the form `="",="",...` is accepted. -// Domain names and paths are accepted as label keys. -// Most users will want to use FromEnv instead. -func DecodeLabels(s string) (map[string]string, error) { - m := map[string]string{} - // Ensure a trailing comma, which allows us to keep the regex simpler - s = strings.TrimRight(strings.TrimSpace(s), ",") + "," - - for len(s) > 0 { - match := labelRegex.FindStringSubmatch(s) - if len(match) == 0 { - return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) - } - v := match[2] - if v == "" { - v = match[3] - } else { - var err error - if v, err = strconv.Unquote(v); err != nil { - return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) - } - } - m[match[1]] = v - - s = s[len(match[0]):] - } - return m, nil -} - -// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE -// and OC_RESOURCE_labelS environment variables. -func FromEnv(context.Context) (*Resource, error) { - res := &Resource{ - Type: strings.TrimSpace(os.Getenv(EnvVarType)), - } - labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) - if labels == "" { - return res, nil - } - var err error - if res.Labels, err = DecodeLabels(labels); err != nil { - return nil, err - } - return res, nil -} - -var _ Detector = FromEnv - -// merge resource information from b into a. In case of a collision, a takes precedence. -func merge(a, b *Resource) *Resource { - if a == nil { - return b - } - if b == nil { - return a - } - res := &Resource{ - Type: a.Type, - Labels: map[string]string{}, - } - if res.Type == "" { - res.Type = b.Type - } - for k, v := range b.Labels { - res.Labels[k] = v - } - // Labels from resource a overwrite labels from resource b. - for k, v := range a.Labels { - res.Labels[k] = v - } - return res -} - -// Detector attempts to detect resource information. -// If the detector cannot find resource information, the returned resource is nil but no -// error is returned. -// An error is only returned on unexpected failures. -type Detector func(context.Context) (*Resource, error) - -// MultiDetector returns a Detector that calls all input detectors in order and -// merges each result with the previous one. In case a type of label key is already set, -// the first set value is takes precedence. -// It returns on the first error that a sub-detector encounters. -func MultiDetector(detectors ...Detector) Detector { - return func(ctx context.Context) (*Resource, error) { - return detectAll(ctx, detectors...) - } -} - -// detectall calls all input detectors sequentially an merges each result with the previous one. -// It returns on the first error that a sub-detector encounters. -func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { - var res *Resource - for _, d := range detectors { - r, err := d(ctx) - if err != nil { - return nil, err - } - res = merge(res, r) - } - return res, nil -} diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b..ed54552054 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/stats/internal/validation.go similarity index 72% rename from vendor/go.opencensus.io/metric/metricdata/doc.go rename to vendor/go.opencensus.io/stats/internal/validation.go index 52a7b3bf85..b946667f96 100644 --- a/vendor/go.opencensus.io/metric/metricdata/doc.go +++ b/vendor/go.opencensus.io/stats/internal/validation.go @@ -12,8 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package metricdata contains the metrics data model. -// -// This is an EXPERIMENTAL package, and may change in arbitrary ways without -// notice. -package metricdata // import "go.opencensus.io/metric/metricdata" +package internal // import "go.opencensus.io/stats/internal" + +const ( + MaxNameLength = 255 +) + +func IsPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go index 1ffd3cefc7..64d02b1961 100644 --- a/vendor/go.opencensus.io/stats/measure.go +++ b/vendor/go.opencensus.io/stats/measure.go @@ -68,6 +68,21 @@ func (m *measureDescriptor) subscribed() bool { return atomic.LoadInt32(&m.subs) == 1 } +// Name returns the name of the measure. +func (m *measureDescriptor) Name() string { + return m.name +} + +// Description returns the description of the measure. +func (m *measureDescriptor) Description() string { + return m.description +} + +// Unit returns the unit of the measure. +func (m *measureDescriptor) Unit() string { + return m.unit +} + var ( mu sync.RWMutex measures = make(map[string]*measureDescriptor) @@ -93,9 +108,8 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor { // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { - v float64 - m Measure - desc *measureDescriptor + v float64 + m *measureDescriptor } // Value returns the value of the Measurement as a float64. diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go index f02c1eda84..acedb21c44 100644 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -17,17 +17,13 @@ package stats // Float64Measure is a measure for float64 values. type Float64Measure struct { - desc *measureDescriptor + *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: v, - } + return Measurement{m: m.measureDescriptor, v: v} } // Float64 creates a new measure for float64 values. @@ -38,18 +34,3 @@ func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} } - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go index d101d79735..c4243ba749 100644 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -17,17 +17,13 @@ package stats // Int64Measure is a measure for int64 values. type Int64Measure struct { - desc *measureDescriptor + *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: float64(v), - } + return Measurement{m: m.measureDescriptor, v: float64(v)} } // Int64 creates a new measure for int64 values. @@ -38,18 +34,3 @@ func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} } - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index ad4691184d..0aced02c30 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -18,7 +18,7 @@ package stats import ( "context" - "go.opencensus.io/metric/metricdata" + "go.opencensus.io/exemplar" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -31,87 +31,39 @@ func init() { } } -type recordOptions struct { - attachments metricdata.Attachments - mutators []tag.Mutator - measurements []Measurement -} - -// WithAttachments applies provided exemplar attachments. -func WithAttachments(attachments metricdata.Attachments) Options { - return func(ro *recordOptions) { - ro.attachments = attachments - } -} - -// WithTags applies provided tag mutators. -func WithTags(mutators ...tag.Mutator) Options { - return func(ro *recordOptions) { - ro.mutators = mutators - } -} - -// WithMeasurements applies provided measurements. -func WithMeasurements(measurements ...Measurement) Options { - return func(ro *recordOptions) { - ro.measurements = measurements - } -} - -// Options apply changes to recordOptions. -type Options func(*recordOptions) - -func createRecordOption(ros ...Options) *recordOptions { - o := &recordOptions{} - for _, ro := range ros { - ro(o) - } - return o -} - // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) -} - -// RecordWithOptions records measurements from the given options (if any) against context -// and tags and attachments in the options (if any). -// If there are any tags in the context, measurements will be tagged with them. -func RecordWithOptions(ctx context.Context, ros ...Options) error { - o := createRecordOption(ros...) - if len(o.measurements) == 0 { - return nil - } recorder := internal.DefaultRecorder if recorder == nil { - return nil + return + } + if len(ms) == 0 { + return } record := false - for _, m := range o.measurements { - if m.desc.subscribed() { + for _, m := range ms { + if m.m.subscribed() { record = true break } } if !record { - return nil + return } - if len(o.mutators) > 0 { - var err error - if ctx, err = tag.New(ctx, o.mutators...); err != nil { - return err - } + recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + ctx, err := tag.New(ctx, mutators...) + if err != nil { + return err } - recorder(tag.FromContext(ctx), o.measurements, o.attachments) + Record(ctx, ms...) return nil } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index d500e67f73..960b94601f 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -17,9 +17,8 @@ package view import ( "math" - "time" - "go.opencensus.io/metric/metricdata" + "go.opencensus.io/exemplar" ) // AggregationData represents an aggregated value from a collection. @@ -27,10 +26,9 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64, attachments map[string]interface{}, t time.Time) + addSample(e *exemplar.Exemplar) clone() AggregationData equal(other AggregationData) bool - toPoint(t metricdata.Type, time time.Time) metricdata.Point } const epsilon = 1e-9 @@ -45,7 +43,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { +func (a *CountData) addSample(_ *exemplar.Exemplar) { a.Value = a.Value + 1 } @@ -62,15 +60,6 @@ func (a *CountData) equal(other AggregationData) bool { return a.Value == a2.Value } -func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // @@ -81,8 +70,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - a.Value += v +func (a *SumData) addSample(e *exemplar.Exemplar) { + a.Value += e.Value } func (a *SumData) clone() AggregationData { @@ -97,17 +86,6 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(a.Value-a2.Value, 2) < epsilon } -func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, int64(a.Value)) - case metricdata.TypeCumulativeFloat64: - return metricdata.NewFloat64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -124,7 +102,7 @@ type DistributionData struct { CountPerBucket []int64 // number of occurrences per bucket // ExemplarsPerBucket is slice the same length as CountPerBucket containing // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*metricdata.Exemplar + ExemplarsPerBucket []*exemplar.Exemplar bounds []float64 // histogram distribution of the values } @@ -132,7 +110,7 @@ func newDistributionData(bounds []float64) *DistributionData { bucketCount := len(bounds) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), bounds: bounds, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, @@ -151,62 +129,64 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -// TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { - if v < a.Min { - a.Min = v +func (a *DistributionData) addSample(e *exemplar.Exemplar) { + f := e.Value + if f < a.Min { + a.Min = f } - if v > a.Max { - a.Max = v + if f > a.Max { + a.Max = f } a.Count++ - a.addToBucket(v, attachments, t) + a.addToBucket(e) if a.Count == 1 { - a.Mean = v + a.Mean = f return } oldMean := a.Mean - a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) + a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) } -func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { +func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { var count *int64 - var i int - var b float64 - for i, b = range a.bounds { - if v < b { + var ex **exemplar.Exemplar + for i, b := range a.bounds { + if e.Value < b { count = &a.CountPerBucket[i] + ex = &a.ExemplarsPerBucket[i] break } } - if count == nil { // Last bucket. - i = len(a.bounds) - count = &a.CountPerBucket[i] + if count == nil { + count = &a.CountPerBucket[len(a.bounds)] + ex = &a.ExemplarsPerBucket[len(a.bounds)] } *count++ - if exemplar := getExemplar(v, attachments, t); exemplar != nil { - a.ExemplarsPerBucket[i] = exemplar - } + *ex = maybeRetainExemplar(*ex, e) } -func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { - if len(attachments) == 0 { - return nil +func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { + if old == nil { + return cur } - return &metricdata.Exemplar{ - Value: v, - Timestamp: t, - Attachments: attachments, + + // Heuristic to pick the "better" exemplar: first keep the one with a + // sampled trace attachment, if neither have a trace attachment, pick the + // one with more attachments. + _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] + if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { + return cur } + return old } func (a *DistributionData) clone() AggregationData { c := *a c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -229,33 +209,6 @@ func (a *DistributionData) equal(other AggregationData) bool { return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } -func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeDistribution: - buckets := []metricdata.Bucket{} - for i := 0; i < len(a.CountPerBucket); i++ { - buckets = append(buckets, metricdata.Bucket{ - Count: a.CountPerBucket[i], - Exemplar: a.ExemplarsPerBucket[i], - }) - } - bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} - - val := &metricdata.Distribution{ - Count: a.Count, - Sum: a.Sum(), - SumOfSquaredDeviation: a.SumOfSquaredDev, - BucketOptions: bucketOptions, - Buckets: buckets, - } - return metricdata.NewDistributionPoint(t, val) - - default: - // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. - panic("unsupported metricdata.Type") - } -} - // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -265,8 +218,8 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - l.Value = v +func (l *LastValueData) addSample(e *exemplar.Exemplar) { + l.Value = e.Value } func (l *LastValueData) clone() AggregationData { @@ -280,14 +233,3 @@ func (l *LastValueData) equal(other AggregationData) bool { } return l.Value == a2.Value } - -func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeGaugeInt64: - return metricdata.NewInt64Point(t, int64(l.Value)) - case metricdata.TypeGaugeFloat64: - return metricdata.NewFloat64Point(t, l.Value) - default: - panic("unsupported metricdata.Type") - } -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 8a6a2c0fdc..32415d4859 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -17,7 +17,8 @@ package view import ( "sort" - "time" + + "go.opencensus.io/exemplar" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" @@ -32,13 +33,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { +func (c *collector) addSample(s string, e *exemplar.Exemplar) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v, attachments, t) + aggregator.addSample(e) } // collectRows returns a snapshot of the collected Row values. diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 37f88e1d9f..c2a08af678 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -17,15 +17,16 @@ package view import ( "bytes" - "errors" "fmt" "reflect" "sort" "sync/atomic" "time" - "go.opencensus.io/metric/metricdata" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -68,11 +69,6 @@ func (v *View) same(other *View) bool { v.Measure.Name() == other.Measure.Name() } -// ErrNegativeBucketBounds error returned if histogram contains negative bounds. -// -// Deprecated: this should not be public. -var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") - // canonicalize canonicalizes v by setting explicit // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { @@ -94,40 +90,20 @@ func (v *View) canonicalize() error { sort.Slice(v.TagKeys, func(i, j int) bool { return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) - sort.Float64s(v.Aggregation.Buckets) - for _, b := range v.Aggregation.Buckets { - if b < 0 { - return ErrNegativeBucketBounds - } - } - // drop 0 bucket silently. - v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) - return nil } -func dropZeroBounds(bounds ...float64) []float64 { - for i, bound := range bounds { - if bound > 0 { - return bounds[i:] - } - } - return []float64{} -} - // viewInternal is the internal representation of a View. type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector - metricDescriptor *metricdata.Descriptor + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - metricDescriptor: viewToMetricDescriptor(v), + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, }, nil } @@ -153,12 +129,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { +func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val, attachments, t) + v.collector.addSample(sig, e) } // A Data is a set of rows about usage of the single measure associated @@ -198,23 +174,11 @@ func (r *Row) Equal(other *Row) bool { return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) } -const maxNameLength = 255 - -// Returns true if the given string contains only printable characters. -func isPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} - func checkViewName(name string) error { - if len(name) > maxNameLength { - return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + if len(name) > internal.MaxNameLength { + return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) } - if !isPrintable(name) { + if !internal.IsPrintable(name) { return fmt.Errorf("view name needs to be an ASCII string") } return nil diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go deleted file mode 100644 index 010f81bab5..0000000000 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" -) - -func getUnit(unit string) metricdata.Unit { - switch unit { - case "1": - return metricdata.UnitDimensionless - case "ms": - return metricdata.UnitMilliseconds - case "By": - return metricdata.UnitBytes - } - return metricdata.UnitDimensionless -} - -func getType(v *View) metricdata.Type { - m := v.Measure - agg := v.Aggregation - - switch agg.Type { - case AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeDistribution: - return metricdata.TypeCumulativeDistribution - case AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeGaugeInt64 - case *stats.Float64Measure: - return metricdata.TypeGaugeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeCount: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeInt64 - default: - panic("unexpected measure type") - } - default: - panic("unexpected aggregation type") - } -} - -func getLableKeys(v *View) []metricdata.LabelKey { - labelKeys := []metricdata.LabelKey{} - for _, k := range v.TagKeys { - labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) - } - return labelKeys -} - -func viewToMetricDescriptor(v *View) *metricdata.Descriptor { - return &metricdata.Descriptor{ - Name: v.Name, - Description: v.Description, - Unit: getUnit(v.Measure.Unit()), - Type: getType(v), - LabelKeys: getLableKeys(v), - } -} - -func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { - labelValues := []metricdata.LabelValue{} - tagMap := make(map[string]string) - for _, tag := range row.Tags { - tagMap[tag.Key.Name()] = tag.Value - } - - for _, key := range expectedKeys { - if val, ok := tagMap[key.Key]; ok { - labelValues = append(labelValues, metricdata.NewLabelValue(val)) - } else { - labelValues = append(labelValues, metricdata.LabelValue{}) - } - } - return labelValues -} - -func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { - return &metricdata.TimeSeries{ - Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: startTime, - } -} - -func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } - - rows := v.collectedRows() - if len(rows) == 0 { - return nil - } - - ts := []*metricdata.TimeSeries{} - for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now, startTime)) - } - - m := &metricdata.Metric{ - Descriptor: *v.metricDescriptor, - TimeSeries: ts, - } - return m -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2f3c018af0..63b0ee3cc3 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -17,11 +17,8 @@ package view import ( "fmt" - "sync" "time" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -46,7 +43,6 @@ type worker struct { timer *time.Ticker c chan command quit, done chan bool - mu sync.RWMutex } var defaultWorker *worker @@ -68,6 +64,11 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { + for _, v := range views { + if err := v.canonicalize(); err != nil { + return err + } + } req := ®isterViewReq{ views: views, err: make(chan error), @@ -106,7 +107,7 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { +func record(tags *tag.Map, ms interface{}, attachments map[string]string) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), @@ -147,9 +148,6 @@ func newWorker() *worker { } func (w *worker) start() { - prodMgr := metricproducer.GlobalManager() - prodMgr.AddProducer(w) - for { select { case cmd := <-w.c: @@ -166,9 +164,6 @@ func (w *worker) start() { } func (w *worker) stop() { - prodMgr := metricproducer.GlobalManager() - prodMgr.DeleteProducer(w) - w.quit <- true <-w.done } @@ -186,8 +181,6 @@ func (w *worker) getMeasureRef(name string) *measureRef { } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - w.mu.Lock() - defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err @@ -207,12 +200,6 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } -func (w *worker) unregisterView(viewName string) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.views, viewName) -} - func (w *worker) reportView(v *viewInternal, now time.Time) { if !v.isSubscribed() { return @@ -236,46 +223,7 @@ func (w *worker) reportView(v *viewInternal, now time.Time) { } func (w *worker) reportUsage(now time.Time) { - w.mu.Lock() - defer w.mu.Unlock() for _, v := range w.views { w.reportView(v, now) } } - -func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { - if !v.isSubscribed() { - return nil - } - - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - - var startTime time.Time - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } else { - startTime = w.startTimes[v] - } - - return viewToMetric(v, now, startTime) -} - -// Read reads all view data and returns them as metrics. -// It is typically invoked by metric reader to export stats in metric format. -func (w *worker) Read() []*metricdata.Metric { - w.mu.Lock() - defer w.mu.Unlock() - now := time.Now() - metrics := make([]*metricdata.Metric, 0, len(w.views)) - for _, v := range w.views { - metric := w.toMetric(v, now) - if metric != nil { - metrics = append(metrics, metric) - } - } - return metrics -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0267e179ae..b38f26f424 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -21,6 +21,8 @@ import ( "strings" "time" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -56,12 +58,6 @@ type registerViewReq struct { } func (cmd *registerViewReq) handleCommand(w *worker) { - for _, v := range cmd.views { - if err := v.canonicalize(); err != nil { - cmd.err <- err - return - } - } var errstr []string for _, view := range cmd.views { vi, err := w.tryRegisterView(view) @@ -103,7 +99,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - w.unregisterView(name) + delete(w.views, name) } cmd.done <- struct{}{} } @@ -121,8 +117,6 @@ type retrieveDataResp struct { } func (cmd *retrieveDataReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() vi, ok := w.views[cmd.v] if !ok { cmd.c <- &retrieveDataResp{ @@ -150,20 +144,23 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { type recordReq struct { tm *tag.Map ms []stats.Measurement - attachments map[string]interface{} + attachments map[string]string t time.Time } func (cmd *recordReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() for _, m := range cmd.ms { if (m == stats.Measurement{}) { // not registered continue } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + e := &exemplar.Exemplar{ + Value: m.Value(), + Timestamp: cmd.t, + Attachments: cmd.attachments, + } + v.addSample(cmd.tm, e) } } } diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go index b27d1b26b1..dcc13f4987 100644 --- a/vendor/go.opencensus.io/tag/context.go +++ b/vendor/go.opencensus.io/tag/context.go @@ -17,6 +17,8 @@ package tag import ( "context" + + "go.opencensus.io/exemplar" ) // FromContext returns the tag map stored in the context. @@ -41,3 +43,25 @@ func NewContext(ctx context.Context, m *Map) context.Context { type ctxKey struct{} var mapCtxKey = ctxKey{} + +func init() { + exemplar.RegisterAttachmentExtractor(extractTagsAttachments) +} + +func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + m := FromContext(ctx) + if m == nil { + return a + } + if len(m.m) == 0 { + return a + } + if a == nil { + a = make(map[string]string) + } + + for k, v := range m.m { + a[exemplar.KeyPrefixTag+k.Name()] = v + } + return a +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go index 0272ef85a4..5b72ba6ad3 100644 --- a/vendor/go.opencensus.io/tag/map.go +++ b/vendor/go.opencensus.io/tag/map.go @@ -28,15 +28,10 @@ type Tag struct { Value string } -type tagContent struct { - value string - m metadatas -} - // Map is a map of tags. Use New to create a context containing // a new Map. type Map struct { - m map[Key]tagContent + m map[Key]string } // Value returns the value for the key if a value for the key exists. @@ -45,7 +40,7 @@ func (m *Map) Value(k Key) (string, bool) { return "", false } v, ok := m.m[k] - return v.value, ok + return v, ok } func (m *Map) String() string { @@ -67,21 +62,21 @@ func (m *Map) String() string { return buffer.String() } -func (m *Map) insert(k Key, v string, md metadatas) { +func (m *Map) insert(k Key, v string) { if _, ok := m.m[k]; ok { return } - m.m[k] = tagContent{value: v, m: md} + m.m[k] = v } -func (m *Map) update(k Key, v string, md metadatas) { +func (m *Map) update(k Key, v string) { if _, ok := m.m[k]; ok { - m.m[k] = tagContent{value: v, m: md} + m.m[k] = v } } -func (m *Map) upsert(k Key, v string, md metadatas) { - m.m[k] = tagContent{value: v, m: md} +func (m *Map) upsert(k Key, v string) { + m.m[k] = v } func (m *Map) delete(k Key) { @@ -89,7 +84,7 @@ func (m *Map) delete(k Key) { } func newMap() *Map { - return &Map{m: make(map[Key]tagContent)} + return &Map{m: make(map[Key]string)} } // Mutator modifies a tag map. @@ -100,17 +95,13 @@ type Mutator interface { // Insert returns a mutator that inserts a // value associated with k. If k already exists in the tag map, // mutator doesn't update the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Insert(k Key, v string, mds ...Metadata) Mutator { +func Insert(k Key, v string) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.insert(k, v, createMetadatas(mds...)) + m.insert(k, v) return m, nil }, } @@ -119,17 +110,13 @@ func Insert(k Key, v string, mds ...Metadata) Mutator { // Update returns a mutator that updates the // value of the tag associated with k with v. If k doesn't // exists in the tag map, the mutator doesn't insert the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Update(k Key, v string, mds ...Metadata) Mutator { +func Update(k Key, v string) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.update(k, v, createMetadatas(mds...)) + m.update(k, v) return m, nil }, } @@ -139,37 +126,18 @@ func Update(k Key, v string, mds ...Metadata) Mutator { // value of the tag associated with k with v. It inserts the // value if k doesn't exist already. It mutates the value // if k already exists. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Upsert(k Key, v string, mds ...Metadata) Mutator { +func Upsert(k Key, v string) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.upsert(k, v, createMetadatas(mds...)) + m.upsert(k, v) return m, nil }, } } -func createMetadatas(mds ...Metadata) metadatas { - var metas metadatas - if len(mds) > 0 { - for _, md := range mds { - if md != nil { - md(&metas) - } - } - } else { - WithTTL(TTLUnlimitedPropagation)(&metas) - } - return metas - -} - // Delete returns a mutator that deletes // the value associated with k. func Delete(k Key) Mutator { @@ -192,10 +160,10 @@ func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { if !checkKeyName(k.Name()) { return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) } - if !checkValue(v.value) { + if !checkValue(v) { return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) } - m.insert(k, v.value, v.m) + m.insert(k, v) } } var err error diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index f8b5827615..3e998950c3 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -162,19 +162,14 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { - if m == nil { - return nil - } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } eg.writeByte(byte(tagsVersionID)) for k, v := range m.m { - if v.m.ttl.ttl == valueTTLUnlimitedPropagation { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v.value)) - } + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v)) } return eg.bytes() } @@ -192,7 +187,7 @@ func Decode(bytes []byte) (*Map, error) { // DecodeEach decodes the given serialized tag map, calling handler for each // tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { +func DecodeEach(bytes []byte, fn func(key Key, val string)) error { eg := &encoderGRPC{ buf: bytes, } @@ -230,7 +225,7 @@ func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error if !checkValue(val) { return errInvalidValue } - fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + fn(key, val) if err != nil { return err } diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go deleted file mode 100644 index 6571a583ea..0000000000 --- a/vendor/go.opencensus.io/tag/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -const ( - // valueTTLNoPropagation prevents tag from propagating. - valueTTLNoPropagation = 0 - - // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. - valueTTLUnlimitedPropagation = -1 -) - -// TTL is metadata that specifies number of hops a tag can propagate. -// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata -type TTL struct { - ttl int -} - -var ( - // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. - TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} - - // TTLNoPropagation is TTL metadata that prevents tag from propagating. - TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} -) - -type metadatas struct { - ttl TTL -} - -// Metadata applies metadatas specified by the function. -type Metadata func(*metadatas) - -// WithTTL applies metadata with provided ttl. -func WithTTL(ttl TTL) Metadata { - return func(m *metadatas) { - m.ttl = ttl - } -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a..f81cd0b4a7 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -25,7 +25,7 @@ func do(ctx context.Context, f func(ctx context.Context)) { m := FromContext(ctx) keyvals := make([]string, 0, 2*len(m.m)) for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v.value) + keyvals = append(keyvals, k.Name(), v) } pprof.Do(ctx, pprof.Labels(keyvals...), f) } diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index 0c54492a2b..01f0f90831 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -59,11 +59,6 @@ func Int64Attribute(key string, value int64) Attribute { return Attribute{key: key, value: value} } -// Float64Attribute returns a float64-valued attribute. -func Float64Attribute(key string, value float64) Attribute { - return Attribute{key: key, value: value} -} - // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { return Attribute{key: key, value: value} @@ -76,8 +71,8 @@ type LinkType int32 // LinkType values. const ( LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The linked span is a child of the current span. - LinkTypeParent // The linked span is the parent of the current span. + LinkTypeChild // The current span is a child of the linked span. + LinkTypeParent // The current span is the parent of the linked span. ) // Link represents a reference from one span to another span. diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go index 775f8274fa..0816892ea1 100644 --- a/vendor/go.opencensus.io/trace/config.go +++ b/vendor/go.opencensus.io/trace/config.go @@ -27,36 +27,10 @@ type Config struct { // IDGenerator is for internal use only. IDGenerator internal.IDGenerator - - // MaxAnnotationEventsPerSpan is max number of annotation events per span - MaxAnnotationEventsPerSpan int - - // MaxMessageEventsPerSpan is max number of message events per span - MaxMessageEventsPerSpan int - - // MaxAnnotationEventsPerSpan is max number of attributes per span - MaxAttributesPerSpan int - - // MaxLinksPerSpan is max number of links per span - MaxLinksPerSpan int } var configWriteMu sync.Mutex -const ( - // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span - DefaultMaxAnnotationEventsPerSpan = 32 - - // DefaultMaxMessageEventsPerSpan is default max number of message events per span - DefaultMaxMessageEventsPerSpan = 128 - - // DefaultMaxAttributesPerSpan is default max number of attributes per span - DefaultMaxAttributesPerSpan = 32 - - // DefaultMaxLinksPerSpan is default max number of links per span - DefaultMaxLinksPerSpan = 32 -) - // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. @@ -70,17 +44,5 @@ func ApplyConfig(cfg Config) { if cfg.IDGenerator != nil { c.IDGenerator = cfg.IDGenerator } - if cfg.MaxAnnotationEventsPerSpan > 0 { - c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan - } - if cfg.MaxMessageEventsPerSpan > 0 { - c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan - } - if cfg.MaxAttributesPerSpan > 0 { - c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan - } - if cfg.MaxLinksPerSpan > 0 { - c.MaxLinksPerSpan = cfg.MaxLinksPerSpan - } config.Store(&c) } diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go deleted file mode 100644 index ffc264f23d..0000000000 --- a/vendor/go.opencensus.io/trace/evictedqueue.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -type evictedQueue struct { - queue []interface{} - capacity int - droppedCount int -} - -func newEvictedQueue(capacity int) *evictedQueue { - eq := &evictedQueue{ - capacity: capacity, - queue: make([]interface{}, 0), - } - - return eq -} - -func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { - eq.queue = eq.queue[1:] - eq.droppedCount++ - } - eq.queue = append(eq.queue, value) -} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/trace/exemplar.go similarity index 52% rename from vendor/go.opencensus.io/metric/metricdata/unit.go rename to vendor/go.opencensus.io/trace/exemplar.go index b483a1371b..416d80590d 100644 --- a/vendor/go.opencensus.io/metric/metricdata/unit.go +++ b/vendor/go.opencensus.io/trace/exemplar.go @@ -12,16 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metricdata +package trace -// Unit is a string encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -type Unit string +import ( + "context" + "encoding/hex" -// Predefined units. To record against a unit not represented here, create your -// own Unit type constant from a string. -const ( - UnitDimensionless Unit = "1" - UnitBytes Unit = "By" - UnitMilliseconds Unit = "ms" + "go.opencensus.io/exemplar" ) + +func init() { + exemplar.RegisterAttachmentExtractor(attachSpanContext) +} + +func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + span := FromContext(ctx) + if span == nil { + return a + } + sc := span.SpanContext() + if !sc.IsSampled() { + return a + } + if a == nil { + a = make(exemplar.Attachments) + } + a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) + a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) + return a +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go index e0d9a4b99e..77a8c73575 100644 --- a/vendor/go.opencensus.io/trace/export.go +++ b/vendor/go.opencensus.io/trace/export.go @@ -85,13 +85,6 @@ type SpanData struct { Annotations []Annotation MessageEvents []MessageEvent Status - Links []Link - HasRemoteParent bool - DroppedAttributeCount int - DroppedAnnotationCount int - DroppedMessageEventCount int - DroppedLinkCount int - - // ChildSpanCount holds the number of child span created for this span. - ChildSpanCount int + Links []Link + HasRemoteParent bool } diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go index 7e808d8f30..1c8b9b34b2 100644 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -15,7 +15,6 @@ // Package internal provides trace internals. package internal -// IDGenerator allows custom generators for TraceId and SpanId. type IDGenerator interface { NewTraceID() [16]byte NewSpanID() [8]byte diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go deleted file mode 100644 index 3f80a33681..0000000000 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "github.com/hashicorp/golang-lru/simplelru" -) - -type lruMap struct { - simpleLruMap *simplelru.LRU - droppedCount int -} - -func newLruMap(size int) *lruMap { - lm := &lruMap{} - lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) - return lm -} - -func (lm *lruMap) add(key, value interface{}) { - evicted := lm.simpleLruMap.Add(key, value) - if evicted { - lm.droppedCount++ - } -} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 38ead7bf0a..9e5e5f0331 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -42,20 +42,6 @@ type Span struct { data *SpanData mu sync.Mutex // protects the contents of *data (but not the pointer value.) spanContext SpanContext - - // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry - // is removed to create room for a new entry. - lruAttributes *lruMap - - // annotations are stored in FIFO queue capped by configured limit. - annotations *evictedQueue - - // messageEvents are stored in FIFO queue capped by configured limit. - messageEvents *evictedQueue - - // links are stored in FIFO queue capped by configured limit. - links *evictedQueue - // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -170,7 +156,6 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont var opts StartOptions var parent SpanContext if p := FromContext(ctx); p != nil { - p.addChild() parent = p.spanContext } for _, op := range o { @@ -241,11 +226,6 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa Name: name, HasRemoteParent: remoteParent, } - span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) - if hasParent { span.data.ParentSpanID = parent.SpanID } @@ -296,21 +276,11 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.lruAttributes.simpleLruMap.Len() > 0 { - sd.Attributes = s.lruAttributesToAttributeMap() - sd.DroppedAttributeCount = s.lruAttributes.droppedCount - } - if len(s.annotations.queue) > 0 { - sd.Annotations = s.interfaceArrayToAnnotationArray() - sd.DroppedAnnotationCount = s.annotations.droppedCount - } - if len(s.messageEvents.queue) > 0 { - sd.MessageEvents = s.interfaceArrayToMessageEventArray() - sd.DroppedMessageEventCount = s.messageEvents.droppedCount - } - if len(s.links.queue) > 0 { - sd.Links = s.interfaceArrayToLinksArray() - sd.DroppedLinkCount = s.links.droppedCount + if s.data.Attributes != nil { + sd.Attributes = make(map[string]interface{}) + for k, v := range s.data.Attributes { + sd.Attributes[k] = v + } } s.mu.Unlock() return &sd @@ -344,57 +314,6 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -func (s *Span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0) - for _, value := range s.links.queue { - linksArr = append(linksArr, value.(Link)) - } - return linksArr -} - -func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0) - for _, value := range s.messageEvents.queue { - messageEventArr = append(messageEventArr, value.(MessageEvent)) - } - return messageEventArr -} - -func (s *Span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0) - for _, value := range s.annotations.queue { - annotationArr = append(annotationArr, value.(Annotation)) - } - return annotationArr -} - -func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}) - for _, key := range s.lruAttributes.simpleLruMap.Keys() { - value, ok := s.lruAttributes.simpleLruMap.Get(key) - if ok { - keyStr := key.(string) - attributes[keyStr] = value - } - } - return attributes -} - -func (s *Span) copyToCappedAttributes(attributes []Attribute) { - for _, a := range attributes { - s.lruAttributes.add(a.key, a.value) - } -} - -func (s *Span) addChild() { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.ChildSpanCount++ - s.mu.Unlock() -} - // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. @@ -403,7 +322,10 @@ func (s *Span) AddAttributes(attributes ...Attribute) { return } s.mu.Lock() - s.copyToCappedAttributes(attributes) + if s.data.Attributes == nil { + s.data.Attributes = make(map[string]interface{}) + } + copyAttributes(s.data.Attributes, attributes) s.mu.Unlock() } @@ -423,7 +345,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in m = make(map[string]interface{}) copyAttributes(m, attributes) } - s.annotations.add(Annotation{ + s.data.Annotations = append(s.data.Annotations, Annotation{ Time: now, Message: msg, Attributes: m, @@ -439,7 +361,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { a = make(map[string]interface{}) copyAttributes(a, attributes) } - s.annotations.add(Annotation{ + s.data.Annotations = append(s.data.Annotations, Annotation{ Time: now, Message: str, Attributes: a, @@ -476,7 +398,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy } now := time.Now() s.mu.Lock() - s.messageEvents.add(MessageEvent{ + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ Time: now, EventType: MessageEventTypeSent, MessageID: messageID, @@ -498,7 +420,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } now := time.Now() s.mu.Lock() - s.messageEvents.add(MessageEvent{ + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ Time: now, EventType: MessageEventTypeRecv, MessageID: messageID, @@ -514,7 +436,7 @@ func (s *Span) AddLink(l Link) { return } s.mu.Lock() - s.links.add(l) + s.data.Links = append(s.data.Links, l) s.mu.Unlock() } @@ -546,12 +468,8 @@ func init() { gen.spanIDInc |= 1 config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, - MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, }) } diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml index 0f3769e5fa..58957222a3 100644 --- a/vendor/go.uber.org/atomic/.travis.yml +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -3,13 +3,9 @@ language: go go_import_path: go.uber.org/atomic go: - - 1.11.x - - 1.12.x - -matrix: - include: - - go: 1.12.x - env: NO_TEST=yes LINT=yes + - 1.7 + - 1.8 + - 1.9 cache: directories: @@ -19,9 +15,9 @@ install: - make install_ci script: - - test -n "$NO_TEST" || make test_ci - - test -n "$NO_TEST" || scripts/test-ubergo.sh - - test -z "$LINT" || make install_lint lint + - make test_ci + - scripts/test-ubergo.sh + - make lint after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1ef263075d..dfc63d9db4 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -1,13 +1,24 @@ +PACKAGES := $(shell glide nv) # Many Go tools take file globs or directories as arguments instead of packages. PACKAGE_FILES ?= *.go -# For pre go1.6 + +# The linting tools evolve with each Go version, so run them only on the latest +# stable release. +GO_VERSION := $(shell go version | cut -d " " -f 3) +GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) +LINTABLE_MINOR_VERSIONS := 7 8 +ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) +SHOULD_LINT := true +endif + + export GO15VENDOREXPERIMENT=1 .PHONY: build build: - go build -i ./... + go build -i $(PACKAGES) .PHONY: install @@ -18,7 +29,7 @@ install: .PHONY: test test: - go test -cover -race ./... + go test -cover -race $(PACKAGES) .PHONY: install_ci @@ -26,24 +37,26 @@ install_ci: install go get github.com/wadey/gocovmerge go get github.com/mattn/goveralls go get golang.org/x/tools/cmd/cover - -.PHONY: install_lint -install_lint: - go get golang.org/x/lint/golint - +ifdef SHOULD_LINT + go get github.com/golang/lint/golint +endif .PHONY: lint lint: +ifdef SHOULD_LINT @rm -rf lint.log @echo "Checking formatting..." @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log @echo "Checking vet..." - @go vet ./... 2>&1 | tee -a lint.log;) + @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) @echo "Checking lint..." - @golint $$(go list ./...) 2>&1 | tee -a lint.log + @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) @echo "Checking for unresolved FIXMEs..." @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log @[ ! -s lint.log ] +else + @echo "Skipping linters on" $(GO_VERSION) +endif .PHONY: test_ci diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index 62eb8e5760..6505abf65c 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -23,13 +23,13 @@ See the [documentation][doc] for a complete API specification. ## Development Status Stable. -___ +
Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic +[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.org/uber-go/atomic [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index 0489d19bad..0000000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper around Value for errors -type Error struct{ v Value } - -// errorHolder is non-nil holder for error object. -// atomic.Value panics on saving nil object, so err object needs to be -// wrapped with valid object first. -type errorHolder struct{ err error } - -// NewError creates new atomic error object -func NewError(err error) *Error { - e := &Error{} - if err != nil { - e.Store(err) - } - return e -} - -// Load atomically loads the wrapped error -func (e *Error) Load() error { - v := e.v.Load() - if v == nil { - return nil - } - - eh := v.(errorHolder) - return eh.err -} - -// Store atomically stores error. -// NOTE: a holder object is allocated on each Store call. -func (e *Error) Store(err error) { - e.v.Store(errorHolder{err: err}) -} diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index ada5ebdcc9..a3321fa2dc 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -1,8 +1,8 @@ language: go sudo: false go: - - 1.11.x - - 1.12.x + - 1.9.x + - 1.10.x go_import_path: go.uber.org/zap env: global: diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 28d10677eb..17d5b49f33 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,22 +1,5 @@ # Changelog -## 1.10.0 (29 Apr 2019) - -Bugfixes: -* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a - string. -* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. - -Enhancements: -* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test - loggers. -* [#675][]: Don't panic when encoding a String field. -* [#704][]: Disable HTML escaping for JSON objects encoded using the - reflect-based encoder. - -Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions -to this release. - ## v1.9.1 (06 Aug 2018) Bugfixes: @@ -320,8 +303,3 @@ upgrade to the upcoming stable release. [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 [#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 073e9aa910..ef7893b3b0 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -9,7 +9,7 @@ PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer int # stable release. GO_VERSION := $(shell go version | cut -d " " -f 3) GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 12 +LINTABLE_MINOR_VERSIONS := 10 ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) SHOULD_LINT := true endif @@ -45,7 +45,7 @@ ifdef SHOULD_LINT @echo "Installing test dependencies for vet..." @go test -i $(PKGS) @echo "Checking vet..." - @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log + @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) @echo "Checking lint..." @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) @echo "Checking for unresolved FIXMEs..." diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go index c1ac0507cd..d02232e39f 100644 --- a/vendor/go.uber.org/zap/global.go +++ b/vendor/go.uber.org/zap/global.go @@ -31,6 +31,7 @@ import ( ) const ( + _stdLogDefaultDepth = 2 _loggerWriterDepth = 2 _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + "https://github.com/uber-go/zap/issues/new and reference this error: %v" diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go deleted file mode 100644 index 6b5dbda807..0000000000 --- a/vendor/go.uber.org/zap/global_go112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build go1.12 - -package zap - -const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go deleted file mode 100644 index d3ab9af933..0000000000 --- a/vendor/go.uber.org/zap/global_prego112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build !go1.12 - -package zap - -const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index ae772e4a17..6a5e33e2f7 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -160,7 +160,7 @@ func (f Field) AddTo(enc ObjectEncoder) { case NamespaceType: enc.OpenNamespace(f.Key) case StringerType: - err = encodeStringer(f.Key, f.Interface, enc) + enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) case ErrorType: encodeError(f.Key, f.Interface.(error), enc) case SkipType: @@ -199,14 +199,3 @@ func addFields(enc ObjectEncoder, fields []Field) { fields[i].AddTo(enc) } } - -func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { - defer func() { - if v := recover(); v != nil { - err = fmt.Errorf("PANIC=%v", v) - } - }() - - enc.AddString(key, stringer.(fmt.Stringer).String()) - return -} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 9aec4eada3..2dc67d81e7 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -137,9 +137,6 @@ func (enc *jsonEncoder) resetReflectBuf() { if enc.reflectBuf == nil { enc.reflectBuf = bufferpool.Get() enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - - // For consistency with our custom JSON encoder. - enc.reflectEnc.SetEscapeHTML(false) } else { enc.reflectBuf.Reset() } diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go index dfead0829d..6ef85b09c7 100644 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -158,7 +158,7 @@ func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { } func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e6934..68f436ed95 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,6 +19,57 @@ See godoc for further documentation and examples. * [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) * [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + +## App Engine + +In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package. Later replaced by the standard `context` package +of the [`context.Context`](https://golang.org/pkg/context#Context) type. + + +This means it's no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + +```go +import ( + "context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" +) + +func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") +} +``` + ## Policy for new packages We no longer accept new provider-specific packages in this repo. For diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index ad2c09236c..5087d845fe 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -73,6 +73,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 4. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. +// (In this final case any provided scopes are ignored.) func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" @@ -108,7 +109,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials id, _ := metadata.ProjectID() return &DefaultCredentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", scopes...), + TokenSource: ComputeTokenSource(""), }, nil } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 81de32b360..ca7d208d77 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -9,7 +9,6 @@ import ( "encoding/json" "errors" "fmt" - "net/url" "strings" "time" @@ -20,13 +19,12 @@ import ( // Endpoint is Google's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", } // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://oauth2.googleapis.com/token" +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" // ConfigFromJSON uses a Google Developers Console client_credentials.json // file to construct a config. @@ -152,16 +150,14 @@ func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oau // from Google Compute Engine (GCE)'s metadata server. It's only valid to use // this token source if your program is running on a GCE instance. // If no account is specified, "default" is used. -// If no scopes are specified, a set of default scopes are automatically granted. // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) } type computeSource struct { account string - scopes []string } func (cs computeSource) Token() (*oauth2.Token, error) { @@ -172,13 +168,7 @@ func (cs computeSource) Token() (*oauth2.Token, error) { if acct == "" { acct = "default" } - tokenURI := "instance/service-accounts/" + acct + "/token" - if len(cs.scopes) > 0 { - v := url.Values{} - v.Set("scopes", strings.Join(cs.scopes, ",")) - tokenURI = tokenURI + "?" + v.Encode() - } - tokenJSON, err := metadata.Get(tokenURI) + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") if err != nil { return nil, err } @@ -194,16 +184,9 @@ func (cs computeSource) Token() (*oauth2.Token, error) { if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") } - tok := &oauth2.Token{ + return &oauth2.Token{ AccessToken: res.AccessToken, TokenType: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - } - // NOTE(cbro): add hidden metadata about where the token is from. - // This is needed for detection by client libraries to know that credentials come from the metadata server. - // This may be removed in a future version of this library. - return tok.WithExtra(map[string]interface{}{ - "oauth2.google.tokenSource": "compute-metadata", - "oauth2.google.serviceAccount": acct, - }), nil + }, nil } diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c386961..a831b77465 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -11,13 +11,11 @@ import ( "fmt" "io" "io/ioutil" - "math" "mime" "net/http" "net/url" "strconv" "strings" - "sync" "time" "golang.org/x/net/context/ctxhttp" @@ -63,21 +61,22 @@ type tokenJSON struct { TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in } func (e *tokenJSON) expiry() (t time.Time) { if v := e.ExpiresIn; v != 0 { return time.Now().Add(time.Duration(v) * time.Second) } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } return } type expirationTime int32 func (e *expirationTime) UnmarshalJSON(b []byte) error { - if len(b) == 0 || string(b) == "null" { - return nil - } var n json.Number err := json.Unmarshal(b, &n) if err != nil { @@ -87,78 +86,106 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { if err != nil { return err } - if i > math.MaxInt32 { - i = math.MaxInt32 - } *e = expirationTime(i) return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - -// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. -type AuthStyle int - -const ( - AuthStyleUnknown AuthStyle = 0 - AuthStyleInParams AuthStyle = 1 - AuthStyleInHeader AuthStyle = 2 -) - -// authStyleCache is the set of tokenURLs we've successfully used via -// RetrieveToken and which style auth we ended up using. -// It's called a cache, but it doesn't (yet?) shrink. It's expected that -// the set of OAuth2 servers a program contacts over time is fixed and -// small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://id.twitch.tv/", + "https://app.box.com/", + "https://api.box.com/", + "https://connect.stripe.com/", + "https://login.mailchimp.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://login.windows.net", + "https://login.live.com/", + "https://login.live-int.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", + "https://api.medium.com/v1/tokens", + "https://log.finalsurge.com/oauth/token", + "https://multisport.todaysplan.com.au/rest/oauth/access_token", + "https://whats.todaysplan.com.au/rest/oauth/access_token", + "https://stackoverflow.com/oauth/access_token", + "https://account.health.nokia.com", + "https://accounts.zoho.com", + "https://gitter.im/login/oauth/token", + "https://openid-connect.onelogin.com/oidc", + "https://api.dailymotion.com/oauth/token", } -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".auth0.com", + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", } -// lookupAuthStyle reports which auth style we last used with tokenURL -// when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] - return +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) } -// setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } } - authStyleCache.m[tokenURL] = v + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true } -// newTokenRequest returns a new *http.Request to retrieve a new token -// from tokenURL using the provided clientID, clientSecret, and POST -// body parameters. -// -// inParams is whether the clientID & clientSecret should be encoded -// as the POST body. An 'inParams' value of true means to send it in -// the POST body (along with any values in v); false means to send it -// in the Authorization header. -func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { - if authStyle == AuthStyleInParams { - v = cloneURLValues(v) +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { if clientID != "" { v.Set("client_id", clientID) } @@ -171,70 +198,15 @@ func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, auth return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if authStyle == AuthStyleInHeader { + if !bustedAuth { req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) } - return req, nil -} - -func cloneURLValues(v url.Values) url.Values { - v2 := make(url.Values, len(v)) - for k, vv := range v { - v2[k] = append([]string(nil), vv...) - } - return v2 -} - -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 - if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { - authStyle = style - needsAuthStyleProbe = false - } else { - authStyle = AuthStyleInHeader // the first way we'll try - } - } - req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - if err != nil { - return nil, err - } - token, err := doTokenRoundTrip(ctx, req) - if err != nil && needsAuthStyleProbe { - // If we get an error, assume the server wants the - // clientID & clientSecret in a different form. - // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. - // In summary: - // - Reddit only accepts client secret in the Authorization header - // - Dropbox accepts either it in URL param or Auth header, but not both. - // - Google only accepts URL param (not spec compliant?), not Auth header - // - Stripe only accepts client secret in Auth header with Bearer method, not Basic - // - // We used to maintain a big table in this code of all the sites and which way - // they went, but maintaining it didn't scale & got annoying. - // So just try both ways. - authStyle = AuthStyleInParams // the second way we'll try - req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - token, err = doTokenRoundTrip(ctx, req) - } - if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token != nil && token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, err -} - -func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) if err != nil { return nil, err } + defer r.Body.Close() body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } @@ -260,6 +232,12 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { Raw: vals, } e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } expires, _ := strconv.Atoi(e) if expires != 0 { token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) @@ -278,8 +256,13 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } if token.AccessToken == "" { - return nil, errors.New("oauth2: server response missing access_token") + return token, errors.New("oauth2: server response missing access_token") } return token, nil } diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index b2bf18298b..0783a94c48 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -61,19 +61,6 @@ type Config struct { // Expires optionally specifies how long the token is valid for. Expires time.Duration - - // Audience optionally specifies the intended audience of the - // request. If empty, the value of TokenURL is used as the - // intended audience. - Audience string - - // PrivateClaims optionally specifies custom private claims in the JWT. - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - PrivateClaims map[string]interface{} - - // UseIDToken optionally specifies whether ID token should be used instead - // of access token when the server returns both. - UseIDToken bool } // TokenSource returns a JWT TokenSource using the configuration @@ -105,10 +92,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } hc := oauth2.NewClient(js.ctx, nil) claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - PrivateClaims: js.conf.PrivateClaims, + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, } if subject := js.conf.Subject; subject != "" { claimSet.Sub = subject @@ -119,9 +105,6 @@ func (js jwtSource) Token() (*oauth2.Token, error) { if t := js.conf.Expires; t > 0 { claimSet.Exp = time.Now().Add(t).Unix() } - if aud := js.conf.Audience; aud != "" { - claimSet.Aud = aud - } h := *defaultHeader h.KeyID = js.conf.PrivateKeyID payload, err := jws.Encode(&h, claimSet, pk) @@ -175,11 +158,5 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } token.Expiry = time.Unix(claimSet.Exp, 0) } - if js.conf.UseIDToken { - if tokenRes.IDToken == "" { - return nil, fmt.Errorf("oauth2: response doesn't have JWT token") - } - token.AccessToken = tokenRes.IDToken - } return token, nil } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c833..3de63315b8 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -26,13 +26,17 @@ import ( // Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as parameters in the request body rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. @@ -67,38 +71,13 @@ type TokenSource interface { Token() (*Token, error) } -// Endpoint represents an OAuth 2.0 provider's authorization and token +// Endpoint contains the OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { AuthURL string TokenURL string - - // AuthStyle optionally specifies how the endpoint wants the - // client ID & client secret sent. The zero value means to - // auto-detect. - AuthStyle AuthStyle } -// AuthStyle represents how requests for tokens are authenticated -// to the server. -type AuthStyle int - -const ( - // AuthStyleAutoDetect means to auto-detect which authentication - // style the provider wants by trying both ways and caching - // the successful way for the future. - AuthStyleAutoDetect AuthStyle = 0 - - // AuthStyleInParams sends the "client_id" and "client_secret" - // in the POST body as application/x-www-form-urlencoded parameters. - AuthStyleInParams AuthStyle = 1 - - // AuthStyleInHeader sends the client_id and client_password - // using HTTP Basic Authorization. This is an optional style - // described in the OAuth2 RFC 6749 section 2.3.1. - AuthStyleInHeader AuthStyle = 2 -) - var ( // AccessTypeOnline and AccessTypeOffline are options passed // to the Options.AuthCodeURL method. They modify the @@ -117,7 +96,7 @@ var ( // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") ) // An AuthCodeOption is passed to Config.AuthCodeURL. @@ -145,7 +124,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. +// It can also be used to pass the PKCE challange. // See https://www.oauth.com/oauth2-servers/pkce/ for more info. func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341a..ee4be545fe 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -154,7 +154,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go index 7f096fef07..ac53e733e7 100644 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -95,7 +95,7 @@ func (s *Weighted) Release(n int64) { s.cur -= n if s.cur < 0 { s.mu.Unlock() - panic("semaphore: released more than held") + panic("semaphore: bad release") } for { next := s.waiters.Front() diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go new file mode 100644 index 0000000000..0f652ea6fb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -0,0 +1,220 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgo + +// This file handles cgo preprocessing of files containing `import "C"`. +// +// DESIGN +// +// The approach taken is to run the cgo processor on the package's +// CgoFiles and parse the output, faking the filenames of the +// resulting ASTs so that the synthetic file containing the C types is +// called "C" (e.g. "~/go/src/net/C") and the preprocessed files +// have their original names (e.g. "~/go/src/net/cgo_unix.go"), +// not the names of the actual temporary files. +// +// The advantage of this approach is its fidelity to 'go build'. The +// downside is that the token.Position.Offset for each AST node is +// incorrect, being an offset within the temporary file. Line numbers +// should still be correct because of the //line comments. +// +// The logic of this file is mostly plundered from the 'go build' +// tool, which also invokes the cgo preprocessor. +// +// +// REJECTED ALTERNATIVE +// +// An alternative approach that we explored is to extend go/types' +// Importer mechanism to provide the identity of the importing package +// so that each time `import "C"` appears it resolves to a different +// synthetic package containing just the objects needed in that case. +// The loader would invoke cgo but parse only the cgo_types.go file +// defining the package-level objects, discarding the other files +// resulting from preprocessing. +// +// The benefit of this approach would have been that source-level +// syntax information would correspond exactly to the original cgo +// file, with no preprocessing involved, making source tools like +// godoc, guru, and eg happy. However, the approach was rejected +// due to the additional complexity it would impose on go/types. (It +// made for a beautiful demo, though.) +// +// cgo files, despite their *.go extension, are not legal Go source +// files per the specification since they may refer to unexported +// members of package "C" such as C.int. Also, a function such as +// C.getpwent has in effect two types, one matching its C type and one +// which additionally returns (errno C.int). The cgo preprocessor +// uses name mangling to distinguish these two functions in the +// processed code, but go/types would need to duplicate this logic in +// its handling of function calls, analogous to the treatment of map +// lookups in which y=m[k] and y,ok=m[k] are both legal. + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses +// the output and returns the resulting ASTs. +// +func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { + tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpdir) + + pkgdir := bp.Dir + if DisplayPath != nil { + pkgdir = DisplayPath(pkgdir) + } + + cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) + if err != nil { + return nil, err + } + var files []*ast.File + for i := range cgoFiles { + rd, err := os.Open(cgoFiles[i]) + if err != nil { + return nil, err + } + display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) + f, err := parser.ParseFile(fset, display, rd, mode) + rd.Close() + if err != nil { + return nil, err + } + files = append(files, f) + } + return files, nil +} + +var cgoRe = regexp.MustCompile(`[/\\:]`) + +// Run invokes the cgo preprocessor on bp.CgoFiles and returns two +// lists of files: the resulting processed files (in temporary +// directory tmpdir) and the corresponding names of the unprocessed files. +// +// Run is adapted from (*builder).cgo in +// $GOROOT/src/cmd/go/build.go, but these features are unsupported: +// Objective C, CGOPKGPATH, CGO_FLAGS. +// +// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in +// to the cgo preprocessor. This in turn will set the // line comments +// referring to those files to use absolute paths. This is needed for +// go/packages using the legacy go list support so it is able to find +// the original files. +func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { + cgoCPPFLAGS, _, _, _ := cflags(bp, true) + _, cgoexeCFLAGS, _, _ := cflags(bp, false) + + if len(bp.CgoPkgConfig) > 0 { + pcCFLAGS, err := pkgConfigFlags(bp) + if err != nil { + return nil, nil, err + } + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + } + + // Allows including _cgo_export.h from .[ch] files in the package. + cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) + + // _cgo_gotypes.go (displayed "C") contains the type definitions. + files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) + displayFiles = append(displayFiles, "C") + for _, fn := range bp.CgoFiles { + // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. + f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") + files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) + displayFiles = append(displayFiles, fn) + } + + var cgoflags []string + if bp.Goroot && bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_runtime_cgo=false") + } + if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_syscall=false") + } + + var cgoFiles []string = bp.CgoFiles + if useabs { + cgoFiles = make([]string, len(bp.CgoFiles)) + for i := range cgoFiles { + cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) + } + } + + args := stringList( + "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", + cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, + ) + if false { + log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) + } + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = pkgdir + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) + } + + return files, displayFiles, nil +} + +// -- unmodified from 'go build' --------------------------------------- + +// Return the flags to use when invoking the C or C++ compilers, or cgo. +func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { + var defaults string + if def { + defaults = "-g -O2" + } + + cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) + cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) + cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) + ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) + return +} + +// envList returns the value of the given environment variable broken +// into fields, using the default value when the variable is empty. +func envList(key, def string) []string { + v := os.Getenv(key) + if v == "" { + v = def + } + return strings.Fields(v) +} + +// stringList's arguments should be a sequence of string or []string values. +// stringList flattens them into a single []string. +func stringList(args ...interface{}) []string { + var x []string + for _, arg := range args { + switch arg := arg.(type) { + case []string: + x = append(x, arg...) + case string: + x = append(x, arg) + default: + panic("stringList: invalid argument") + } + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go new file mode 100644 index 0000000000..b5bb95a63e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -0,0 +1,39 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgo + +import ( + "errors" + "fmt" + "go/build" + "os/exec" + "strings" +) + +// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. +func pkgConfig(mode string, pkgs []string) (flags []string, err error) { + cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) + out, err := cmd.CombinedOutput() + if err != nil { + s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) + if len(out) > 0 { + s = fmt.Sprintf("%s: %s", s, out) + } + return nil, errors.New(s) + } + if len(out) > 0 { + flags = strings.Fields(string(out)) + } + return +} + +// pkgConfigFlags calls pkg-config if needed and returns the cflags +// needed to build the package. +func pkgConfigFlags(p *build.Package) (cflags []string, err error) { + if len(p.CgoPkgConfig) == 0 { + return nil, nil + } + return pkgConfig("--cflags", p.CgoPkgConfig) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go index a807d0aaa2..9f65049140 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -127,10 +127,10 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) // --- generic export data --- // populate type map with predeclared "known" types - for index, typ := range predeclared() { + for index, typ := range predeclared { p.typIndex[typ] = index } - if len(p.typIndex) != len(predeclared()) { + if len(p.typIndex) != len(predeclared) { return nil, internalError("duplicate entries in type map?") } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go index e3c3107825..b31eacfc0f 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -126,7 +126,7 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // --- generic export data --- // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) + p.typList = append(p.typList, predeclared...) // read package data pkg = p.pkg() @@ -976,58 +976,50 @@ const ( aliasTag ) -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - if predecl == nil { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - } - return predecl +var predeclared = []types.Type{ + // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, } type anyType struct{} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go deleted file mode 100644 index be671c79b7..0000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ /dev/null @@ -1,723 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. - -// +build go1.11 - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "go/ast" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "reflect" - "sort" -) - -// Current indexed export format version. Increase with each format change. -// 0: Go1.11 encoding -const iexportVersion = 0 - -// IExportData returns the binary export data for pkg. -// If no file set is provided, position info will be missing. -func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - - p := iexporter{ - out: bytes.NewBuffer(nil), - fset: fset, - allPkgs: map[*types.Package]bool{}, - stringIndex: map[string]uint64{}, - declIndex: map[types.Object]uint64{}, - typIndex: map[types.Type]uint64{}, - } - - for i, pt := range predeclared() { - p.typIndex[pt] = uint64(i) - } - if len(p.typIndex) > predeclReserved { - panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) - } - - // Initialize work queue with exported declarations. - scope := pkg.Scope() - for _, name := range scope.Names() { - if ast.IsExported(name) { - p.pushDecl(scope.Lookup(name)) - } - } - - // Loop until no more work. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popHead()) - } - - // Append indices to data0 section. - dataLen := uint64(p.data0.Len()) - w := p.newWriter() - w.writeIndex(p.declIndex, pkg) - w.flush() - - // Assemble header. - var hdr intWriter - hdr.WriteByte('i') - hdr.uint64(iexportVersion) - hdr.uint64(uint64(p.strings.Len())) - hdr.uint64(dataLen) - - // Flush output. - io.Copy(p.out, &hdr) - io.Copy(p.out, &p.strings) - io.Copy(p.out, &p.data0) - - return p.out.Bytes(), nil -} - -// writeIndex writes out an object index. mainIndex indicates whether -// we're writing out the main index, which is also read by -// non-compiler tools and includes a complete package description -// (i.e., name and height). -func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Package][]types.Object{} - - // For the main index, make sure to include every package that - // we reference, even if we're not exporting (or reexporting) - // any symbols from it. - pkgObjs[localpkg] = nil - for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil - } - - for obj := range index { - pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) - } - - var pkgs []*types.Package - for pkg, objs := range pkgObjs { - pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].Name() < objs[j].Name() - }) - } - - sort.Slice(pkgs, func(i, j int) bool { - return pkgs[i].Path() < pkgs[j].Path() - }) - - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.string(pkg.Path()) - w.string(pkg.Name()) - w.uint64(uint64(0)) // package height is not needed for go/types - - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, obj := range objs { - w.string(obj.Name()) - w.uint64(index[obj]) - } - } -} - -type iexporter struct { - fset *token.FileSet - out *bytes.Buffer - - // allPkgs tracks all packages that have been referenced by - // the export data, so we can ensure to include them in the - // main index. - allPkgs map[*types.Package]bool - - declTodo objQueue - - strings intWriter - stringIndex map[string]uint64 - - data0 intWriter - declIndex map[types.Object]uint64 - typIndex map[types.Type]uint64 -} - -// stringOff returns the offset of s within the string section. -// If not already present, it's added to the end. -func (p *iexporter) stringOff(s string) uint64 { - off, ok := p.stringIndex[s] - if !ok { - off = uint64(p.strings.Len()) - p.stringIndex[s] = off - - p.strings.uint64(uint64(len(s))) - p.strings.WriteString(s) - } - return off -} - -// pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(obj types.Object) { - // Package unsafe is known to the compiler and predeclared. - assert(obj.Pkg() != types.Unsafe) - - if _, ok := p.declIndex[obj]; ok { - return - } - - p.declIndex[obj] = ^uint64(0) // mark n present in work queue - p.declTodo.pushTail(obj) -} - -// exportWriter handles writing out individual data section chunks. -type exportWriter struct { - p *iexporter - - data intWriter - currPkg *types.Package - prevFile string - prevLine int64 -} - -func (p *iexporter) doDecl(obj types.Object) { - w := p.newWriter() - w.setPkg(obj.Pkg(), false) - - switch obj := obj.(type) { - case *types.Var: - w.tag('V') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - - case *types.Func: - sig, _ := obj.Type().(*types.Signature) - if sig.Recv() != nil { - panic(internalErrorf("unexpected method: %v", sig)) - } - w.tag('F') - w.pos(obj.Pos()) - w.signature(sig) - - case *types.Const: - w.tag('C') - w.pos(obj.Pos()) - w.value(obj.Type(), obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - w.tag('A') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - break - } - - // Defined type. - w.tag('T') - w.pos(obj.Pos()) - - underlying := obj.Type().Underlying() - w.typ(underlying, obj.Pkg()) - - t := obj.Type() - if types.IsInterface(t) { - break - } - - named, ok := t.(*types.Named) - if !ok { - panic(internalErrorf("%s is not a defined type", t)) - } - - n := named.NumMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := named.Method(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.param(sig.Recv()) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected object: %v", obj)) - } - - p.declIndex[obj] = w.flush() -} - -func (w *exportWriter) tag(tag byte) { - w.data.WriteByte(tag) -} - -func (w *exportWriter) pos(pos token.Pos) { - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - - // When file is the same as the last position (common case), - // we can save a few bytes by delta encoding just the line - // number. - // - // Note: Because data objects may be read out of order (or not - // at all), we can only apply delta encoding within a single - // object. This is handled implicitly by tracking prevFile and - // prevLine as fields of exportWriter. - - if file == w.prevFile { - delta := line - w.prevLine - w.int64(delta) - if delta == deltaNewFile { - w.int64(-1) - } - } else { - w.int64(deltaNewFile) - w.int64(line) // line >= 0 - w.string(file) - w.prevFile = file - } - w.prevLine = line -} - -func (w *exportWriter) pkg(pkg *types.Package) { - // Ensure any referenced packages are declared in the main index. - w.p.allPkgs[pkg] = true - - w.string(pkg.Path()) -} - -func (w *exportWriter) qualifiedIdent(obj types.Object) { - // Ensure any referenced declarations are written out too. - w.p.pushDecl(obj) - - w.string(obj.Name()) - w.pkg(obj.Pkg()) -} - -func (w *exportWriter) typ(t types.Type, pkg *types.Package) { - w.data.uint64(w.p.typOff(t, pkg)) -} - -func (p *iexporter) newWriter() *exportWriter { - return &exportWriter{p: p} -} - -func (w *exportWriter) flush() uint64 { - off := uint64(w.p.data0.Len()) - io.Copy(&w.p.data0, &w.data) - return off -} - -func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { - off, ok := p.typIndex[t] - if !ok { - w := p.newWriter() - w.doTyp(t, pkg) - off = predeclReserved + w.flush() - p.typIndex[t] = off - } - return off -} - -func (w *exportWriter) startType(k itag) { - w.data.uint64(uint64(k)) -} - -func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { - switch t := t.(type) { - case *types.Named: - w.startType(definedType) - w.qualifiedIdent(t.Obj()) - - case *types.Pointer: - w.startType(pointerType) - w.typ(t.Elem(), pkg) - - case *types.Slice: - w.startType(sliceType) - w.typ(t.Elem(), pkg) - - case *types.Array: - w.startType(arrayType) - w.uint64(uint64(t.Len())) - w.typ(t.Elem(), pkg) - - case *types.Chan: - w.startType(chanType) - // 1 RecvOnly; 2 SendOnly; 3 SendRecv - var dir uint64 - switch t.Dir() { - case types.RecvOnly: - dir = 1 - case types.SendOnly: - dir = 2 - case types.SendRecv: - dir = 3 - } - w.uint64(dir) - w.typ(t.Elem(), pkg) - - case *types.Map: - w.startType(mapType) - w.typ(t.Key(), pkg) - w.typ(t.Elem(), pkg) - - case *types.Signature: - w.startType(signatureType) - w.setPkg(pkg, true) - w.signature(t) - - case *types.Struct: - w.startType(structType) - w.setPkg(pkg, true) - - n := t.NumFields() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - f := t.Field(i) - w.pos(f.Pos()) - w.string(f.Name()) - w.typ(f.Type(), pkg) - w.bool(f.Embedded()) - w.string(t.Tag(i)) // note (or tag) - } - - case *types.Interface: - w.startType(interfaceType) - w.setPkg(pkg, true) - - n := t.NumEmbeddeds() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - f := t.Embedded(i) - w.pos(f.Obj().Pos()) - w.typ(f.Obj().Type(), f.Obj().Pkg()) - } - - n = t.NumExplicitMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := t.ExplicitMethod(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) - } -} - -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) - } - - w.currPkg = pkg -} - -func (w *exportWriter) signature(sig *types.Signature) { - w.paramList(sig.Params()) - w.paramList(sig.Results()) - if sig.Params().Len() > 0 { - w.bool(sig.Variadic()) - } -} - -func (w *exportWriter) paramList(tup *types.Tuple) { - n := tup.Len() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - w.param(tup.At(i)) - } -} - -func (w *exportWriter) param(obj types.Object) { - w.pos(obj.Pos()) - w.localIdent(obj) - w.typ(obj.Type(), obj.Pkg()) -} - -func (w *exportWriter) value(typ types.Type, v constant.Value) { - w.typ(typ, nil) - - switch v.Kind() { - case constant.Bool: - w.bool(constant.BoolVal(v)) - case constant.Int: - var i big.Int - if i64, exact := constant.Int64Val(v); exact { - i.SetInt64(i64) - } else if ui64, exact := constant.Uint64Val(v); exact { - i.SetUint64(ui64) - } else { - i.SetString(v.ExactString(), 10) - } - w.mpint(&i, typ) - case constant.Float: - f := constantToFloat(v) - w.mpfloat(f, typ) - case constant.Complex: - w.mpfloat(constantToFloat(constant.Real(v)), typ) - w.mpfloat(constantToFloat(constant.Imag(v)), typ) - case constant.String: - w.string(constant.StringVal(v)) - case constant.Unknown: - // package contains type errors - default: - panic(internalErrorf("unexpected value %v (%T)", v, v)) - } -} - -// constantToFloat converts a constant.Value with kind constant.Float to a -// big.Float. -func constantToFloat(x constant.Value) *big.Float { - assert(x.Kind() == constant.Float) - // Use the same floating-point precision (512) as cmd/compile - // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). - const mpprec = 512 - var f big.Float - f.SetPrec(mpprec) - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - n := valueToRat(num) - d := valueToRat(denom) - f.SetRat(n.Quo(n, d)) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - _, ok := f.SetString(x.ExactString()) - assert(ok) - } - return &f -} - -// mpint exports a multi-precision integer. -// -// For unsigned types, small values are written out as a single -// byte. Larger values are written out as a length-prefixed big-endian -// byte string, where the length prefix is encoded as its complement. -// For example, bytes 0, 1, and 2 directly represent the integer -// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, -// 2-, and 3-byte big-endian string follow. -// -// Encoding for signed types use the same general approach as for -// unsigned types, except small values use zig-zag encoding and the -// bottom bit of length prefix byte for large values is reserved as a -// sign bit. -// -// The exact boundary between small and large encodings varies -// according to the maximum number of bytes needed to encode a value -// of type typ. As a special case, 8-bit types are always encoded as a -// single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ types.Type) { - basic, ok := typ.Underlying().(*types.Basic) - if !ok { - panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) - } - - signed, maxBytes := intSize(basic) - - negative := x.Sign() < 0 - if !signed && negative { - panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) - } - - b := x.Bytes() - if len(b) > 0 && b[0] == 0 { - panic(internalErrorf("leading zeros")) - } - if uint(len(b)) > maxBytes { - panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) - } - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - // Check if x can use small value encoding. - if len(b) <= 1 { - var ux uint - if len(b) == 1 { - ux = uint(b[0]) - } - if signed { - ux <<= 1 - if negative { - ux-- - } - } - if ux < maxSmall { - w.data.WriteByte(byte(ux)) - return - } - } - - n := 256 - uint(len(b)) - if signed { - n = 256 - 2*uint(len(b)) - if negative { - n |= 1 - } - } - if n < maxSmall || n >= 256 { - panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) - } - - w.data.WriteByte(byte(n)) - w.data.Write(b) -} - -// mpfloat exports a multi-precision floating point number. -// -// The number's value is decomposed into mantissa × 2**exponent, where -// mantissa is an integer. The value is written out as mantissa (as a -// multi-precision integer) and then the exponent, except exponent is -// omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { - if f.IsInf() { - panic("infinite constant") - } - - // Break into f = mant × 2**exp, with 0.5 <= mant < 1. - var mant big.Float - exp := int64(f.MantExp(&mant)) - - // Scale so that mant is an integer. - prec := mant.MinPrec() - mant.SetMantExp(&mant, int(prec)) - exp -= int64(prec) - - manti, acc := mant.Int(nil) - if acc != big.Exact { - panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) - } - w.mpint(manti, typ) - if manti.Sign() != 0 { - w.int64(exp) - } -} - -func (w *exportWriter) bool(b bool) bool { - var x uint64 - if b { - x = 1 - } - w.uint64(x) - return b -} - -func (w *exportWriter) int64(x int64) { w.data.int64(x) } -func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } -func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } - -func (w *exportWriter) localIdent(obj types.Object) { - // Anonymous parameters. - if obj == nil { - w.string("") - return - } - - name := obj.Name() - if name == "_" { - w.string("_") - return - } - - w.string(name) -} - -type intWriter struct { - bytes.Buffer -} - -func (w *intWriter) int64(x int64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], x) - w.Write(buf[:n]) -} - -func (w *intWriter) uint64(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - w.Write(buf[:n]) -} - -func assert(cond bool) { - if !cond { - panic("internal error: assertion failed") - } -} - -// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. - -// objQueue is a FIFO queue of types.Object. The zero value of objQueue is -// a ready-to-use empty queue. -type objQueue struct { - ring []types.Object - head, tail int -} - -// empty returns true if q contains no Nodes. -func (q *objQueue) empty() bool { - return q.head == q.tail -} - -// pushTail appends n to the tail of the queue. -func (q *objQueue) pushTail(obj types.Object) { - if len(q.ring) == 0 { - q.ring = make([]types.Object, 16) - } else if q.head+len(q.ring) == q.tail { - // Grow the ring. - nring := make([]types.Object, len(q.ring)*2) - // Copy the old elements. - part := q.ring[q.head%len(q.ring):] - if q.tail-q.head <= len(part) { - part = part[:q.tail-q.head] - copy(nring, part) - } else { - pos := copy(nring, part) - copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) - } - q.ring, q.head, q.tail = nring, 0, q.tail-q.head - } - - q.ring[q.tail%len(q.ring)] = obj - q.tail++ -} - -// popHead pops a node from the head of the queue. It panics if q is empty. -func (q *objQueue) popHead() types.Object { - if q.empty() { - panic("dequeue empty") - } - obj := q.ring[q.head%len(q.ring)] - q.head++ - return obj -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index 3cb7ae5b9e..0fd22bb038 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -109,7 +109,7 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] }, } - for i, pt := range predeclared() { + for i, pt := range predeclared { p.typCache[uint64(i)] = pt } @@ -142,12 +142,8 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] p.pkgIndex[pkg] = nameIndex pkgList[i] = pkg } - var localpkg *types.Package - for _, pkg := range pkgList { - if pkg.Path() == path { - localpkg = pkg - } - } + + localpkg := pkgList[0] names := make([]string, 0, len(p.pkgIndex[localpkg])) for name := range p.pkgIndex[localpkg] { @@ -334,10 +330,6 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) default: - if b.Kind() == types.Invalid { - val = constant.MakeUnknown() - return - } errorf("unexpected type %v", typ) // panics panic("unreachable") } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3799f8ed8b..0ec0fab24b 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -14,7 +14,7 @@ but all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. -Two query operators are currently supported: "file" and "pattern". +Three query operators are currently supported: "file", "pattern", and "name". The query "file=path/to/file.go" matches the package or packages enclosing the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" @@ -25,6 +25,10 @@ the underlying build tool. In most cases this is unnecessary, but an application can use Load("pattern=" + x) as an escaping mechanism to ensure that x is not interpreted as a query operator if it contains '='. +The query "name=identifier" matches packages whose package declaration contains +the specified identifier. For example, "name=rand" would match the packages +"math/rand" and "crypto/rand", and "name=main" would match all executables. + All other query operators are reserved for future use and currently cause Load to report an error. diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 22ff769ef2..860c3ec156 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -18,7 +18,7 @@ import ( // Driver type driverRequest struct { - Command string `json:"command"` + Command string `json "command"` Mode LoadMode `json:"mode"` Env []string `json:"env"` BuildFlags []string `json:"build_flags"` diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 72c0c5d632..7ca5ebe459 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -78,7 +78,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { var sizes types.Sizes var sizeserr error var sizeswg sync.WaitGroup - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode >= LoadTypes { sizeswg.Add(1) go func() { sizes, sizeserr = getSizes(cfg) @@ -104,7 +104,7 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "iamashamedtousethedisabledqueryname": + case "name": packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) @@ -121,6 +121,20 @@ extractQueries: } } + // TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released. + var listfunc driver + var isFallback bool + listfunc = func(cfg *Config, words ...string) (*driverResponse, error) { + response, err := golistDriverCurrent(cfg, words...) + if _, ok := err.(goTooOldError); ok { + isFallback = true + listfunc = golistDriverFallback + return listfunc(cfg, words...) + } + listfunc = golistDriverCurrent + return response, err + } + response := &responseDeduper{} var err error @@ -128,7 +142,7 @@ extractQueries: // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := golistDriver(cfg, restPatterns...) + dr, err := listfunc(cfg, restPatterns...) if err != nil { return nil, err } @@ -147,13 +161,13 @@ extractQueries: var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { + if err := runContainsQueries(cfg, listfunc, isFallback, response, containFiles); err != nil { return nil, err } } if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + if err := runNamedQueries(cfg, listfunc, response, packagesNamed); err != nil { return nil, err } } @@ -166,25 +180,17 @@ extractQueries: containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { - return nil, err + + if len(needPkgs) > 0 { + addNeededOverlayPackages(cfg, listfunc, response, needPkgs) + if err != nil { + return nil, err + } } // Check candidate packages for containFiles. if len(containFiles) > 0 { for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{ - { - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }, - }, - }) - continue - } + pkg := response.seenPackages[id] for _, f := range containFiles { for _, g := range pkg.GoFiles { if sameFile(f, g) { @@ -199,9 +205,6 @@ extractQueries: } func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } dr, err := driver(cfg, pkgs...) if err != nil { return err @@ -209,15 +212,10 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response.dr) - if err != nil { - return err - } - addNeededOverlayPackages(cfg, driver, response, needPkgs) return nil } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { +func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -227,6 +225,11 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } + if isFallback { + pattern = "." + cfg.Dir = fdir + } + dirResponse, err := driver(cfg, pattern) if err != nil { return err @@ -556,10 +559,10 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriver uses the "go list" command to expand the pattern -// words and return metadata for the specified packages. dir may be -// "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { +// golistDriverCurrent uses the "go list" command to expand the +// pattern words and return metadata for the specified packages. +// dir may be "" and env may be nil, as per os/exec.Command. +func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -602,7 +605,7 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { if old, found := seen[p.ImportPath]; found { if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + return nil, fmt.Errorf("go list repeated package %v with different values", p.ImportPath) } // skip the duplicate continue @@ -617,25 +620,16 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { OtherFiles: absJoin(p.Dir, otherFiles(p)...), } - // Work around https://golang.org/issue/28749: - // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Filter out any elements of CompiledGoFiles that are also in OtherFiles. - // We have to keep this workaround in place until go1.12 is a distant memory. - if len(pkg.OtherFiles) > 0 { - other := make(map[string]bool, len(pkg.OtherFiles)) - for _, f := range pkg.OtherFiles { - other[f] = true - } - - out := pkg.CompiledGoFiles[:0] - for _, f := range pkg.CompiledGoFiles { - if other[f] { - continue - } - out = append(out, f) + // Workaround for https://golang.org/issue/28749. + // TODO(adonovan): delete before go1.12 release. + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if strings.HasSuffix(f, ".s") { + continue } - pkg.CompiledGoFiles = out + out = append(out, f) } + pkg.CompiledGoFiles = out // Extract the PkgPath from the package's ID. if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { @@ -717,16 +711,14 @@ func absJoin(dir string, fileses ...[]string) (res []string) { } func golistargs(cfg *Config, words []string) []string { - const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + "list", "-e", "-json", "-compiled", fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), - fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0), + fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + fmt.Sprintf("-find=%t", cfg.Mode < LoadImports && !cfg.Tests), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -736,6 +728,9 @@ func golistargs(cfg *Config, words []string) []string { // invokeGo returns the stdout of a go command invocation. func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cfg, args...)) }(time.Now()) + } stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) cmd := exec.CommandContext(cfg.Context, "go", args...) @@ -749,21 +744,11 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Dir = cfg.Dir cmd.Stdout = stdout cmd.Stderr = stderr - if debug { - defer func(start time.Time) { - log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) - }(time.Now()) - } - if err := cmd.Run(); err != nil { - // Check for 'go' executable not being found. - if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) - } - exitErr, ok := err.(*exec.ExitError) if !ok { // Catastrophic error: + // - executable not found // - context cancellation return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) } @@ -773,38 +758,6 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} } - // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show - // the error in the Err section of stdout in case -e option is provided. - // This fix is provided for backwards compatibility. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit - // status if there's a dependency on a package that doesn't exist. But it should return - // a zero exit status and set an error on that package. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { - // try to extract package name from string - stderrStr := stderr.String() - var importPath string - colon := strings.Index(stderrStr, ":") - if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { - importPath = stderrStr[len("go build "):colon] - } - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - importPath, strings.Trim(stderrStr, "\n")) - return bytes.NewBufferString(output), nil - } - // Export mode entails a build. // If that build fails, errors appear on stderr // (despite the -e flag) and the Export field is blank. @@ -824,12 +777,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS // is set. if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cfg, args...), stderr) } // debugging if false { - fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cfg, args...), stdout) } return stdout, nil @@ -844,17 +797,13 @@ func containsGoFile(s []string) bool { return false } -func cmdDebugStr(cmd *exec.Cmd, args ...string) string { +func cmdDebugStr(cfg *Config, args ...string) string { env := make(map[string]string) - for _, kv := range cmd.Env { + for _, kv := range cfg.Env { split := strings.Split(kv, "=") k, v := split[0], split[1] env[k] = v } - var quotedArgs []string - for _, arg := range args { - quotedArgs = append(quotedArgs, strconv.Quote(arg)) - } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) } diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback.go b/vendor/golang.org/x/tools/go/packages/golist_fallback.go new file mode 100644 index 0000000000..141fa19ac1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_fallback.go @@ -0,0 +1,450 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/internal/cgo" +) + +// TODO(matloob): Delete this file once Go 1.12 is released. + +// This file provides backwards compatibility support for +// loading for versions of Go earlier than 1.11. This support is meant to +// assist with migration to the Package API until there's +// widespread adoption of these newer Go versions. +// This support will be removed once Go 1.12 is released +// in Q1 2019. + +func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) { + // Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list. + // This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same + // path and a user provides an absolute path to a directory that's shadowed by an earlier + // directory in GOROOT or GOPATH with the same package path. + words = cleanAbsPaths(cfg, words) + + original, deps, err := getDeps(cfg, words...) + if err != nil { + return nil, err + } + + var tmpdir string // used for generated cgo files + var needsTestVariant []struct { + pkg, xtestPkg *Package + } + + var response driverResponse + allPkgs := make(map[string]bool) + addPackage := func(p *jsonPackage, isRoot bool) { + id := p.ImportPath + + if allPkgs[id] { + return + } + allPkgs[id] = true + + pkgpath := id + + if pkgpath == "unsafe" { + p.GoFiles = nil // ignore fake unsafe.go file + } + + importMap := func(importlist []string) map[string]*Package { + importMap := make(map[string]*Package) + for _, id := range importlist { + + if id == "C" { + for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} { + if pkgpath != path && importMap[path] == nil { + importMap[path] = &Package{ID: path} + } + } + continue + } + importMap[vendorlessPath(id)] = &Package{ID: id} + } + return importMap + } + compiledGoFiles := absJoin(p.Dir, p.GoFiles) + // Use a function to simplify control flow. It's just a bunch of gotos. + var cgoErrors []error + var outdir string + getOutdir := func() (string, error) { + if outdir != "" { + return outdir, nil + } + if tmpdir == "" { + if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil { + return "", err + } + } + outdir = filepath.Join(tmpdir, strings.Replace(p.ImportPath, "/", "_", -1)) + if err := os.MkdirAll(outdir, 0755); err != nil { + outdir = "" + return "", err + } + return outdir, nil + } + processCgo := func() bool { + // Suppress any cgo errors. Any relevant errors will show up in typechecking. + // TODO(matloob): Skip running cgo if Mode < LoadTypes. + outdir, err := getOutdir() + if err != nil { + cgoErrors = append(cgoErrors, err) + return false + } + files, _, err := runCgo(p.Dir, outdir, cfg.Env) + if err != nil { + cgoErrors = append(cgoErrors, err) + return false + } + compiledGoFiles = append(compiledGoFiles, files...) + return true + } + if len(p.CgoFiles) == 0 || !processCgo() { + compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker. + } + if isRoot { + response.Roots = append(response.Roots, id) + } + pkg := &Package{ + ID: id, + Name: p.Name, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: compiledGoFiles, + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + PkgPath: pkgpath, + Imports: importMap(p.Imports), + // TODO(matloob): set errors on the Package to cgoErrors + } + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + }) + } + response.Packages = append(response.Packages, pkg) + if cfg.Tests && isRoot { + testID := fmt.Sprintf("%s [%s.test]", id, id) + if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 { + response.Roots = append(response.Roots, testID) + testPkg := &Package{ + ID: testID, + Name: p.Name, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles), + CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + PkgPath: pkgpath, + Imports: importMap(append(p.Imports, p.TestImports...)), + // TODO(matloob): set errors on the Package to cgoErrors + } + response.Packages = append(response.Packages, testPkg) + var xtestPkg *Package + if len(p.XTestGoFiles) > 0 { + xtestID := fmt.Sprintf("%s_test [%s.test]", id, id) + response.Roots = append(response.Roots, xtestID) + // Generate test variants for all packages q where a path exists + // such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test) + // and rewrite all import map entries of p to point to testPkg (the test variant of + // p), and of each q to point to the test variant of that q. + xtestPkg = &Package{ + ID: xtestID, + Name: p.Name + "_test", + GoFiles: absJoin(p.Dir, p.XTestGoFiles), + CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles), + PkgPath: pkgpath + "_test", + Imports: importMap(p.XTestImports), + } + // Add to list of packages we need to rewrite imports for to refer to test variants. + // We may need to create a test variant of a package that hasn't been loaded yet, so + // the test variants need to be created later. + needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg}) + response.Packages = append(response.Packages, xtestPkg) + } + // testmain package + testmainID := id + ".test" + response.Roots = append(response.Roots, testmainID) + imports := map[string]*Package{} + imports[testPkg.PkgPath] = &Package{ID: testPkg.ID} + if xtestPkg != nil { + imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID} + } + testmainPkg := &Package{ + ID: testmainID, + Name: "main", + PkgPath: testmainID, + Imports: imports, + } + response.Packages = append(response.Packages, testmainPkg) + outdir, err := getOutdir() + if err != nil { + testmainPkg.Errors = append(testmainPkg.Errors, Error{ + Pos: "-", + Msg: fmt.Sprintf("failed to generate testmain: %v", err), + Kind: ListError, + }) + return + } + // Don't use a .go extension on the file, so that the tests think the file is inside GOCACHE. + // This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11 + // go list generates test mains in the cache, and the test code knows not to rely on paths in the + // cache to stay stable. + testmain := filepath.Join(outdir, "testmain-go") + extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg) + if err != nil { + testmainPkg.Errors = append(testmainPkg.Errors, Error{ + Pos: "-", + Msg: fmt.Sprintf("failed to generate testmain: %v", err), + Kind: ListError, + }) + } + deps = append(deps, extradeps...) + for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os + imports[imp] = &Package{ID: imp} + } + testmainPkg.GoFiles = []string{testmain} + testmainPkg.CompiledGoFiles = []string{testmain} + } + } + } + + for _, pkg := range original { + addPackage(pkg, true) + } + if cfg.Mode < LoadImports || len(deps) == 0 { + return &response, nil + } + + buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...) + if err != nil { + return nil, err + } + + // Decode the JSON and convert it to Package form. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + addPackage(p, false) + } + + for _, v := range needsTestVariant { + createTestVariants(&response, v.pkg, v.xtestPkg) + } + + return &response, nil +} + +func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) { + allPkgs := make(map[string]*Package) + for _, pkg := range response.Packages { + allPkgs[pkg.ID] = pkg + } + needsTestVariant := make(map[string]bool) + needsTestVariant[pkgUnderTest.ID] = true + var needsVariantRec func(p *Package) bool + needsVariantRec = func(p *Package) bool { + if needsTestVariant[p.ID] { + return true + } + for _, imp := range p.Imports { + if needsVariantRec(allPkgs[imp.ID]) { + // Don't break because we want to make sure all dependencies + // have been processed, and all required test variants of our dependencies + // exist. + needsTestVariant[p.ID] = true + } + } + if !needsTestVariant[p.ID] { + return false + } + // Create a clone of the package. It will share the same strings and lists of source files, + // but that's okay. It's only necessary for the Imports map to have a separate identity. + testVariant := *p + testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID) + testVariant.Imports = make(map[string]*Package) + for imp, pkg := range p.Imports { + testVariant.Imports[imp] = pkg + if needsTestVariant[pkg.ID] { + testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} + } + } + response.Packages = append(response.Packages, &testVariant) + return needsTestVariant[p.ID] + } + // finally, update the xtest package's imports + for imp, pkg := range xtestPkg.Imports { + if allPkgs[pkg.ID] == nil { + fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID) + } + if needsVariantRec(allPkgs[pkg.ID]) { + xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} + } + } +} + +// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative +// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an +// absolute path so an error can be returned later. +func cleanAbsPaths(cfg *Config, words []string) []string { + var searchpaths []string + var cleaned = make([]string, len(words)) + for i := range cleaned { + cleaned[i] = words[i] + // Ignore relative directory paths (they must already be goroot-relative) and Go source files + // (absolute source files are already allowed for ad-hoc packages). + // TODO(matloob): Can there be non-.go files in ad-hoc packages. + if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") { + continue + } + // otherwise, it's an absolute path. Search GOPATH and GOROOT to find it. + if searchpaths == nil { + cmd := exec.Command("go", "env", "GOPATH", "GOROOT") + cmd.Env = cfg.Env + out, err := cmd.Output() + if err != nil { + searchpaths = []string{} + continue // suppress the error, it will show up again when running go list + } + lines := strings.Split(string(out), "\n") + if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" { + continue // suppress error + } + // first line is GOPATH + for _, path := range filepath.SplitList(lines[0]) { + searchpaths = append(searchpaths, filepath.Join(path, "src")) + } + // second line is GOROOT + searchpaths = append(searchpaths, filepath.Join(lines[1], "src")) + } + for _, sp := range searchpaths { + if strings.HasPrefix(cleaned[i], sp) { + cleaned[i] = strings.TrimPrefix(cleaned[i], sp) + cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator)) + } + } + } + return cleaned +} + +// vendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +// Copied from golang.org/x/tools/imports/fix.go. +func vendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +// getDeps runs an initial go list to determine all the dependency packages. +func getDeps(cfg *Config, words ...string) (initial []*jsonPackage, deps []string, err error) { + buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...) + if err != nil { + return nil, nil, err + } + + depsSet := make(map[string]bool) + var testImports []string + + // Extract deps from the JSON. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + initial = append(initial, p) + for _, dep := range p.Deps { + depsSet[dep] = true + } + if cfg.Tests { + // collect the additional imports of the test packages. + pkgTestImports := append(p.TestImports, p.XTestImports...) + for _, imp := range pkgTestImports { + if depsSet[imp] { + continue + } + depsSet[imp] = true + testImports = append(testImports, imp) + } + } + } + // Get the deps of the packages imported by tests. + if len(testImports) > 0 { + buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...) + if err != nil { + return nil, nil, err + } + // Extract deps from the JSON. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) + } + for _, dep := range p.Deps { + depsSet[dep] = true + } + } + } + + for _, orig := range initial { + delete(depsSet, orig.ImportPath) + } + + deps = make([]string, 0, len(depsSet)) + for dep := range depsSet { + deps = append(deps, dep) + } + sort.Strings(deps) // ensure output is deterministic + return initial, deps, nil +} + +func golistArgsFallback(cfg *Config, words []string) []string { + fullargs := []string{"list", "-e", "-json"} + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) { + // Use go/build to open cgo files and determine the cgo flags, etc, from them. + // This is tricky so it's best to avoid reimplementing as much as we can, and + // we plan to delete this support once Go 1.12 is released anyways. + // TODO(matloob): This isn't completely correct because we're using the Default + // context. Perhaps we should more accurately fill in the context. + bp, err := build.ImportDir(pkgdir, build.ImportMode(0)) + if err != nil { + return nil, nil, err + } + for _, ev := range env { + if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev { + bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev { + bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev { + bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev { + bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...) + } + } + return cgo.Run(bp, pkgdir, tmpdir, true) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go new file mode 100644 index 0000000000..128e00e25a --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go @@ -0,0 +1,318 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go +// testmain generation code. + +package packages + +import ( + "errors" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "os" + "sort" + "strings" + "text/template" + "unicode" + "unicode/utf8" +) + +// TODO(matloob): Delete this file once Go 1.12 is released. + +// This file complements golist_fallback.go by providing +// support for generating testmains. + +func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) { + testFuncs, err := loadTestFuncs(testPkg, xtestPkg) + if err != nil { + return nil, nil, err + } + extraimports = []string{"testing", "testing/internal/testdeps"} + if testFuncs.TestMain == nil { + extraimports = append(extraimports, "os") + } + // Transitive dependencies of ("testing", "testing/internal/testdeps"). + // os is part of the transitive closure so it and its transitive dependencies are + // included regardless of whether it's imported in the template below. + extradeps = []string{ + "errors", + "internal/cpu", + "unsafe", + "internal/bytealg", + "internal/race", + "runtime/internal/atomic", + "runtime/internal/sys", + "runtime", + "sync/atomic", + "sync", + "io", + "unicode", + "unicode/utf8", + "bytes", + "math", + "syscall", + "time", + "internal/poll", + "internal/syscall/unix", + "internal/testlog", + "os", + "math/bits", + "strconv", + "reflect", + "fmt", + "sort", + "strings", + "flag", + "runtime/debug", + "context", + "runtime/trace", + "testing", + "bufio", + "regexp/syntax", + "regexp", + "compress/flate", + "encoding/binary", + "hash", + "hash/crc32", + "compress/gzip", + "path/filepath", + "io/ioutil", + "text/tabwriter", + "runtime/pprof", + "testing/internal/testdeps", + } + return extraimports, extradeps, writeTestmain(out, testFuncs) +} + +// The following is adapted from the cmd/go testmain generation code. + +// isTestFunc tells whether fn has the type of a testing function. arg +// specifies the parameter type we look for: B, M or T. +func isTestFunc(fn *ast.FuncDecl, arg string) bool { + if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params.List == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) > 1 { + return false + } + ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) + if !ok { + return false + } + // We can't easily check that the type is *testing.M + // because we don't know how testing has been imported, + // but at least check that it's *M or *something.M. + // Same applies for B and T. + if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { + return true + } + if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg { + return true + } + return false +} + +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// loadTestFuncs returns the testFuncs describing the tests that will be run. +func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) { + t := &testFuncs{ + TestPackage: ptest, + XTestPackage: pxtest, + } + for _, file := range ptest.GoFiles { + if !strings.HasSuffix(file, "_test.go") { + continue + } + if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil { + return nil, err + } + } + if pxtest != nil { + for _, file := range pxtest.GoFiles { + if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil { + return nil, err + } + } + } + return t, nil +} + +// writeTestmain writes the _testmain.go file for t to the file named out. +func writeTestmain(out string, t *testFuncs) error { + f, err := os.Create(out) + if err != nil { + return err + } + defer f.Close() + + if err := testmainTmpl.Execute(f, t); err != nil { + return err + } + + return nil +} + +type testFuncs struct { + Tests []testFunc + Benchmarks []testFunc + Examples []testFunc + TestMain *testFunc + TestPackage *Package + XTestPackage *Package + ImportTest bool + NeedTest bool + ImportXtest bool + NeedXtest bool +} + +// Tested returns the name of the package being tested. +func (t *testFuncs) Tested() string { + return t.TestPackage.Name +} + +type testFunc struct { + Package string // imported package name (_test or _xtest) + Name string // function name + Output string // output, for examples + Unordered bool // output is allowed to be unordered. +} + +func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { + var fset = token.NewFileSet() + + f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) + if err != nil { + return errors.New("failed to parse test file " + filename) + } + for _, d := range f.Decls { + n, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + if n.Recv != nil { + continue + } + name := n.Name.String() + switch { + case name == "TestMain": + if isTestFunc(n, "T") { + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + continue + } + err := checkTestFunc(fset, n, "M") + if err != nil { + return err + } + if t.TestMain != nil { + return errors.New("multiple definitions of TestMain") + } + t.TestMain = &testFunc{pkg, name, "", false} + *doImport, *seen = true, true + case isTest(name, "Test"): + err := checkTestFunc(fset, n, "T") + if err != nil { + return err + } + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + case isTest(name, "Benchmark"): + err := checkTestFunc(fset, n, "B") + if err != nil { + return err + } + t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + } + } + ex := doc.Examples(f) + sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order }) + for _, e := range ex { + *doImport = true // import test file whether executed or not + if e.Output == "" && !e.EmptyOutput { + // Don't run examples with no output. + continue + } + t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) + *seen = true + } + return nil +} + +func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error { + if !isTestFunc(fn, arg) { + name := fn.Name.String() + pos := fset.Position(fn.Pos()) + return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg) + } + return nil +} + +var testmainTmpl = template.Must(template.New("main").Parse(` +package main + +import ( +{{if not .TestMain}} + "os" +{{end}} + "testing" + "testing/internal/testdeps" + +{{if .ImportTest}} + {{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}} +{{end}} +{{if .ImportXtest}} + {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}} +{{end}} +) + +var tests = []testing.InternalTest{ +{{range .Tests}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var benchmarks = []testing.InternalBenchmark{ +{{range .Benchmarks}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var examples = []testing.InternalExample{ +{{range .Examples}} + {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, +{{end}} +} + +func init() { + testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}} +} + +func main() { + m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples) +{{with .TestMain}} + {{.Package}}.{{.Name}}(m) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`)) diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index ce322ce5ed..71ffcd9d55 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,15 +1,11 @@ package packages import ( - "bytes" - "encoding/json" "go/parser" "go/token" - "path" "path/filepath" "strconv" "strings" - "sync" ) // processGolistOverlay provides rudimentary support for adding @@ -31,123 +27,50 @@ func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, havePkgs[pkg.PkgPath] = pkg.ID } - var rootDirs map[string]string - var onceGetRootDirs sync.Once - - for opath, contents := range cfg.Overlay { - base := filepath.Base(opath) - if strings.HasSuffix(opath, "_test.go") { +outer: + for path, contents := range cfg.Overlay { + base := filepath.Base(path) + if strings.HasSuffix(path, "_test.go") { // Overlays don't support adding new test files yet. // TODO(matloob): support adding new test files. continue } - dir := filepath.Dir(opath) - var pkg *Package - var fileExists bool - for _, p := range response.Packages { - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue + dir := filepath.Dir(path) + for _, pkg := range response.Packages { + var dirContains, fileExists bool + for _, f := range pkg.GoFiles { + if sameFile(filepath.Dir(f), dir) { + dirContains = true } - pkg = p if filepath.Base(f) == base { fileExists = true } } - } - // The overlay could have included an entirely new package. - if pkg == nil { - onceGetRootDirs.Do(func() { - rootDirs = determineRootDirs(cfg) - }) - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - var pkgPath string - for rdir, rpath := range rootDirs { - // TODO(matloob): This doesn't properly handle symlinks. - r, err := filepath.Rel(rdir, dir) - if err != nil { - continue + if dirContains { + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles? + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path) + modifiedPkgsSet[pkg.ID] = true } - pkgPath = filepath.ToSlash(r) - if rpath != "" { - pkgPath = path.Join(rpath, pkgPath) - } - // We only create one new package even it can belong in multiple modules or GOPATH entries. - // This is okay because tools (such as the LSP) that use overlays will recompute the overlay - // once the file is saved, and golist will do the right thing. - // TODO(matloob): Implement module tiebreaking? - break - } - if pkgPath == "" { - continue - } - pkgName, ok := extractPackageName(opath, contents) - if !ok { - continue - } - id := pkgPath - // Try to reclaim a package with the same id if it exists in the response. - for _, p := range response.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break + imports, err := extractImports(path, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue outer } - } - // Otherwise, create a new package - if pkg == nil { - pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} - // TODO(matloob): Is it okay to amend response.Packages this way? - response.Packages = append(response.Packages, pkg) - havePkgs[pkg.PkgPath] = id - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - _, found := pkg.Imports[imp] - if !found { - // TODO(matloob): Handle cases when the following block isn't correct. - // These include imports of test variants, imports of vendored packages, etc. - id, ok := havePkgs[imp] - if !ok { - id = imp + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + needPkgsSet[imp] = true + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of test variants, imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + } } - pkg.Imports[imp] = &Package{ID: id} - } - } - continue - } - - // toPkgPath tries to guess the package path given the id. - // This isn't always correct -- it's certainly wrong for - // vendored packages' paths. - toPkgPath := func(id string) string { - // TODO(matloob): Handle vendor paths. - i := strings.IndexByte(id, ' ') - if i >= 0 { - return id[:i] - } - return id - } - - // Do another pass now that new packages have been created to determine the - // set of missing packages. - for _, pkg := range response.Packages { - for _, imp := range pkg.Imports { - pkgPath := toPkgPath(imp.ID) - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true + continue outer } } } @@ -163,46 +86,6 @@ func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, return modifiedPkgs, needPkgs, err } -// determineRootDirs returns a mapping from directories code can be contained in to the -// corresponding import path prefixes of those directories. -// Its result is used to try to determine the import path for a package containing -// an overlay file. -func determineRootDirs(cfg *Config) map[string]string { - // Assume modules first: - out, err := invokeGo(cfg, "list", "-m", "-json", "all") - if err != nil { - return determineRootDirsGOPATH(cfg) - } - m := map[string]string{} - type jsonMod struct{ Path, Dir string } - for dec := json.NewDecoder(out); dec.More(); { - mod := new(jsonMod) - if err := dec.Decode(mod); err != nil { - return m // Give up and return an empty map. Package won't be found for overlay. - } - if mod.Dir != "" && mod.Path != "" { - // This is a valid module; add it to the map. - m[mod.Dir] = mod.Path - } - } - return m -} - -func determineRootDirsGOPATH(cfg *Config) map[string]string { - m := map[string]string{} - out, err := invokeGo(cfg, "env", "GOPATH") - if err != nil { - // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. - // When we try to find the import path for a directory, there will be no root-dir match and - // we'll give up. - return m - } - for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { - m[filepath.Join(p, "src")] = "" - } - return m -} - func extractImports(filename string, contents []byte) ([]string, error) { f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? if err != nil { @@ -219,44 +102,3 @@ func extractImports(filename string, contents []byte) ([]string, error) { } return res, nil } - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index cd151469a2..f4aac56e97 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -19,75 +19,42 @@ import ( "log" "os" "path/filepath" - "strings" "sync" "golang.org/x/tools/go/gcexportdata" ) -// A LoadMode controls the amount of detail to return when loading. -// The bits below can be combined to specify which fields should be -// filled in the result packages. -// The zero value is a special case, equivalent to combining -// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. -// ID and Errors (if present) will always be filled. -// Load may return more information than requested. +// A LoadMode specifies the amount of detail to return when loading. +// Higher-numbered modes cause Load to return more information, +// but may be slower. Load may return more information than requested. type LoadMode int const ( - // NeedName adds Name and PkgPath. - NeedName LoadMode = 1 << iota - - // NeedFiles adds GoFiles and OtherFiles. - NeedFiles - - // NeedCompiledGoFiles adds CompiledGoFiles. - NeedCompiledGoFiles - - // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain - // "placeholder" Packages with only the ID set. - NeedImports - - // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports - // is not set NeedDeps has no effect. - NeedDeps - - // NeedExportsFile adds ExportsFile. - NeedExportsFile - - // NeedTypes adds Types, Fset, and IllTyped. - NeedTypes - - // NeedSyntax adds Syntax. - NeedSyntax - - // NeedTypesInfo adds TypesInfo. - NeedTypesInfo - - // NeedTypesSizes adds TypesSizes. - NeedTypesSizes -) - -const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadImports = LoadFiles | NeedImports | NeedDeps - - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadAllSyntax = LoadSyntax + // LoadFiles finds the packages and computes their source file lists. + // Package fields: ID, Name, Errors, GoFiles, and OtherFiles. + LoadFiles LoadMode = iota + + // LoadImports adds import information for each package + // and its dependencies. + // Package fields added: Imports. + LoadImports + + // LoadTypes adds type information for package-level + // declarations in the packages matching the patterns. + // Package fields added: Types, Fset, and IllTyped. + // This mode uses type information provided by the build system when + // possible, and may fill in the ExportFile field. + LoadTypes + + // LoadSyntax adds typed syntax trees for the packages matching the patterns. + // Package fields added: Syntax, and TypesInfo, for direct pattern matches only. + LoadSyntax + + // LoadAllSyntax adds typed syntax trees for the packages matching the patterns + // and all dependencies. + // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo, + // for all packages in the import graph. + LoadAllSyntax ) // A Config specifies details about how packages should be loaded. @@ -123,7 +90,7 @@ type Config struct { BuildFlags []string // Fset provides source position information for syntax trees and types. - // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + // If Fset is nil, the loader will create a new FileSet. Fset *token.FileSet // ParseFile is called to read and parse each file @@ -261,9 +228,9 @@ type Package struct { Imports map[string]*Package // Types provides type information for the package. - // The NeedTypes LoadMode bit sets this field for packages matching the - // patterns; type information for dependencies may be missing or incomplete, - // unless NeedDeps and NeedImports are also set. + // Modes LoadTypes and above set this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. Types *types.Package // Fset provides position information for Types, TypesInfo, and Syntax. @@ -276,9 +243,8 @@ type Package struct { // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. // - // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. - // If NeedDeps and NeedImports are also set, this field will also be populated - // for dependencies. + // Mode LoadSyntax sets this field for packages matching the patterns. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. Syntax []*ast.File // TypesInfo provides type information about the package's syntax trees. @@ -405,34 +371,15 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes - parseCache map[string]*parseValue - parseCacheMu sync.Mutex - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations - - // TODO(matloob): Add an implied mode here and use that instead of mode. - // Implied mode would contain all the fields we need the data for so we can - // get the actually requested fields. We'll zero them out before returning - // packages to the user. This will make it easier for us to get the conditions - // where we need certain modes right. -} - -type parseValue struct { - f *ast.File - err error - ready chan struct{} + sizes types.Sizes + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations } func newLoader(cfg *Config) *loader { - ld := &loader{ - parseCache: map[string]*parseValue{}, - } + ld := &loader{} if cfg != nil { ld.Config = *cfg } - if ld.Config.Mode == 0 { - ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. - } if ld.Config.Env == nil { ld.Config.Env = os.Environ() } @@ -445,7 +392,7 @@ func newLoader(cfg *Config) *loader { } } - if ld.Mode&NeedTypes != 0 { + if ld.Mode >= LoadTypes { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -454,8 +401,12 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + var isrc interface{} + if src != nil { + isrc = src + } const mode = parser.AllErrors | parser.ParseComments - return parser.ParseFile(fset, filename, src, mode) + return parser.ParseFile(fset, filename, isrc, mode) } } } @@ -478,9 +429,11 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { rootIndex = i } lpkg := &loaderPackage{ - Package: pkg, - needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, - needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 || + Package: pkg, + needtypes: ld.Mode >= LoadAllSyntax || + ld.Mode >= LoadTypes && rootIndex >= 0, + needsrc: ld.Mode >= LoadAllSyntax || + ld.Mode >= LoadSyntax && rootIndex >= 0 || len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files pkg.ExportFile == "" && pkg.PkgPath != "unsafe", } @@ -553,17 +506,14 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if lpkg.needsrc { srcPkgs = append(srcPkgs, lpkg) } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } stack = stack[:len(stack)-1] // pop lpkg.color = black return lpkg.needsrc } - if ld.Mode&(NeedImports|NeedDeps) == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. + if ld.Mode < LoadImports { + //we do this to drop the stub import packages that we are not even going to try to resolve for _, lpkg := range initial { lpkg.Imports = nil } @@ -573,19 +523,17 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { visit(lpkg) } } - if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true } } // Load type data if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 { + if ld.Mode >= LoadTypes { var wg sync.WaitGroup for _, lpkg := range initial { wg.Add(1) @@ -598,61 +546,16 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { } result := make([]*Package, len(initial)) - importPlaceholders := make(map[string]*Package) for i, lpkg := range initial { result[i] = lpkg.Package } - for i := range ld.pkgs { - // Clear all unrequested fields, for extra de-Hyrum-ization. - if ld.Mode&NeedName == 0 { - ld.pkgs[i].Name = "" - ld.pkgs[i].PkgPath = "" - } - if ld.Mode&NeedFiles == 0 { - ld.pkgs[i].GoFiles = nil - ld.pkgs[i].OtherFiles = nil - } - if ld.Mode&NeedCompiledGoFiles == 0 { - ld.pkgs[i].CompiledGoFiles = nil - } - if ld.Mode&NeedImports == 0 { - ld.pkgs[i].Imports = nil - } - if ld.Mode&NeedExportsFile == 0 { - ld.pkgs[i].ExportFile = "" - } - if ld.Mode&NeedTypes == 0 { - ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil - ld.pkgs[i].IllTyped = false - } - if ld.Mode&NeedSyntax == 0 { - ld.pkgs[i].Syntax = nil - } - if ld.Mode&NeedTypesInfo == 0 { - ld.pkgs[i].TypesInfo = nil - } - if ld.Mode&NeedTypesSizes == 0 { - ld.pkgs[i].TypesSizes = nil - } - if ld.Mode&NeedDeps == 0 { - for j, pkg := range ld.pkgs[i].Imports { - ph, ok := importPlaceholders[pkg.ID] - if !ok { - ph = &Package{ID: pkg.ID} - importPlaceholders[pkg.ID] = ph - } - ld.pkgs[i].Imports[j] = ph - } - } - } return result, nil } // loadRecursive loads the specified package and its dependencies, // recursively, in parallel, in topological order. // It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. +// Precondition: ld.Mode >= LoadTypes. func (ld *loader) loadRecursive(lpkg *loaderPackage) { lpkg.loadOnce.Do(func() { // Load the direct dependencies, in parallel. @@ -674,7 +577,7 @@ func (ld *loader) loadRecursive(lpkg *loaderPackage) { // loadPackage loads the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode >= LoadTypes. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -804,7 +707,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // Type-check bodies of functions only in non-initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. - IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, + IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial, Error: appendError, Sizes: ld.sizes, @@ -857,42 +760,6 @@ func (f importerFunc) Import(path string) (*types.Package, error) { return f(pat // the number of parallel I/O calls per process. var ioLimit = make(chan bool, 20) -func (ld *loader) parseFile(filename string) (*ast.File, error) { - ld.parseCacheMu.Lock() - v, ok := ld.parseCache[filename] - if ok { - // cache hit - ld.parseCacheMu.Unlock() - <-v.ready - } else { - // cache miss - v = &parseValue{ready: make(chan struct{})} - ld.parseCache[filename] = v - ld.parseCacheMu.Unlock() - - var src []byte - for f, contents := range ld.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - if src == nil { - ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) - <-ioLimit // signal - } - if err != nil { - v.err = err - } else { - v.f, v.err = ld.ParseFile(ld.Fset, filename, src) - } - - close(v.ready) - } - return v.f, v.err -} - // parseFiles reads and parses the Go source files and returns the ASTs // of the ones that could be at least partially parsed, along with a // list of I/O and parse errors encountered. @@ -913,7 +780,24 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { } wg.Add(1) go func(i int, filename string) { - parsed[i], errors[i] = ld.parseFile(filename) + ioLimit <- true // wait + // ParseFile may return both an AST and an error. + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + src, err = ioutil.ReadFile(filename) + } + if err != nil { + parsed[i], errors[i] = nil, err + } else { + parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src) + } + <-ioLimit // signal wg.Done() }(i, file) } @@ -954,7 +838,7 @@ func sameFile(x, y string) bool { // overlay case implies x==y.) return true } - if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if filepath.Base(x) == filepath.Base(y) { // (optimisation) if xi, err := os.Stat(x); err == nil { if yi, err := os.Stat(y); err == nil { return os.SameFile(xi, yi) @@ -1067,5 +951,5 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error } func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0 + return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go similarity index 89% rename from vendor/golang.org/x/tools/internal/imports/fix.go rename to vendor/golang.org/x/tools/imports/fix.go index d9ba3b786d..f18c413514 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/imports/fix.go @@ -23,35 +23,45 @@ import ( "strings" "sync" "time" - "unicode" - "unicode/utf8" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/gopathwalk" ) +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +func localPrefixes() []string { + if LocalPrefix != "" { + return strings.Split(LocalPrefix, ",") + } + return nil +} + // importToGroup is a list of functions which map from an import path to // a group number. -var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ - func(env *ProcessEnv, importPath string) (num int, ok bool) { - if env.LocalPrefix == "" { - return - } - for _, p := range strings.Split(env.LocalPrefix, ",") { +var importToGroup = []func(importPath string) (num int, ok bool){ + func(importPath string) (num int, ok bool) { + for _, p := range localPrefixes() { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(importPath string) (num int, ok bool) { if strings.HasPrefix(importPath, "appengine") { return 2, true } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(importPath string) (num int, ok bool) { if strings.Contains(importPath, ".") { return 1, true } @@ -59,9 +69,9 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool }, } -func importGroup(env *ProcessEnv, importPath string) int { +func importGroup(importPath string) int { for _, fn := range importToGroup { - if n, ok := fn(env, importPath); ok { + if n, ok := fn(importPath); ok { return n } } @@ -229,7 +239,7 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - env *ProcessEnv // the environment to use for go commands, etc. + fixEnv *fixEnv // the environment to use for go commands, etc. loadRealPackageNames bool // if true, load package names from disk rather than guessing them. otherFiles []*ast.File // sibling files. @@ -254,7 +264,7 @@ func (p *pass) loadPackageNames(imports []*importInfo) error { unknown = append(unknown, imp.importPath) } - names, err := p.env.getResolver().loadPackageNames(unknown, p.srcDir) + names, err := p.fixEnv.getResolver().loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -279,7 +289,7 @@ func (p *pass) importIdentifier(imp *importInfo) string { if known != nil && known.name != "" { return known.name } - return importPathToAssumedName(imp.importPath) + return importPathToNameBasic(imp.importPath, p.srcDir) } // load reads in everything necessary to run a pass, and reports whether the @@ -312,7 +322,7 @@ func (p *pass) load() bool { if p.loadRealPackageNames { err := p.loadPackageNames(append(imports, p.candidates...)) if err != nil { - if p.env.Debug { + if Debug { log.Printf("loading package names: %v", err) } return false @@ -379,7 +389,7 @@ func (p *pass) fix() bool { } path := strings.Trim(imp.Path.Value, `""`) ident := p.importIdentifier(&importInfo{importPath: path}) - if ident != importPathToAssumedName(path) { + if ident != importPathToNameBasic(path, p.srcDir) { imp.Name = &ast.Ident{Name: ident, NamePos: imp.Pos()} } } @@ -436,13 +446,13 @@ func (p *pass) addCandidate(imp *importInfo, pkg *packageInfo) { // easily be extended by adding a file with an init function. var fixImports = fixImportsDefault -func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *fixEnv) error { abs, err := filepath.Abs(filename) if err != nil { return err } srcDir := filepath.Dir(abs) - if env.Debug { + if Debug { log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) } @@ -474,7 +484,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // Third pass: get real package names where we had previously used // the naive algorithm. This is the first step that will use the // environment, so we provide it here for the first time. - p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p = &pass{fset: fset, f: f, srcDir: srcDir, fixEnv: env} p.loadRealPackageNames = true p.otherFiles = otherFiles if p.load() { @@ -498,16 +508,13 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return nil } -// ProcessEnv contains environment variables and settings that affect the use of +// fixEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. -type ProcessEnv struct { - LocalPrefix string - Debug bool - +type fixEnv struct { // If non-empty, these will be used instead of the // process-wide values. - GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string - WorkingDir string + GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS string + WorkingDir string // If true, use go/packages regardless of the environment. ForceGoPackages bool @@ -515,7 +522,7 @@ type ProcessEnv struct { resolver resolver } -func (e *ProcessEnv) env() []string { +func (e *fixEnv) env() []string { env := os.Environ() add := func(k, v string) { if v != "" { @@ -527,32 +534,28 @@ func (e *ProcessEnv) env() []string { add("GO111MODULE", e.GO111MODULE) add("GOPROXY", e.GOPROXY) add("GOFLAGS", e.GOFLAGS) - add("GOSUMDB", e.GOSUMDB) if e.WorkingDir != "" { add("PWD", e.WorkingDir) } return env } -func (e *ProcessEnv) getResolver() resolver { +func (e *fixEnv) getResolver() resolver { if e.resolver != nil { return e.resolver } if e.ForceGoPackages { - e.resolver = &goPackagesResolver{env: e} - return e.resolver + return &goPackagesResolver{env: e} } out, err := e.invokeGo("env", "GOMOD") if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { - e.resolver = &gopathResolver{env: e} - return e.resolver + return &gopathResolver{env: e} } - e.resolver = &moduleResolver{env: e} - return e.resolver + return &moduleResolver{env: e} } -func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { +func (e *fixEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { return &packages.Config{ Mode: mode, Dir: e.WorkingDir, @@ -560,14 +563,14 @@ func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config } } -func (e *ProcessEnv) buildContext() *build.Context { +func (e *fixEnv) buildContext() *build.Context { ctx := build.Default ctx.GOROOT = e.GOROOT ctx.GOPATH = e.GOPATH return &ctx } -func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) { +func (e *fixEnv) invokeGo(args ...string) (*bytes.Buffer, error) { cmd := exec.Command("go", args...) stdout := &bytes.Buffer{} stderr := &bytes.Buffer{} @@ -576,7 +579,7 @@ func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) { cmd.Env = e.env() cmd.Dir = e.WorkingDir - if e.Debug { + if Debug { defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) } if err := cmd.Run(); err != nil { @@ -627,7 +630,7 @@ type resolver interface { // gopathResolver implements resolver for GOPATH and module workspaces using go/packages. type goPackagesResolver struct { - env *ProcessEnv + env *fixEnv } func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -645,7 +648,7 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin if _, ok := names[path]; ok { continue } - names[path] = importPathToAssumedName(path) + names[path] = importPathToNameBasic(path, srcDir) } return names, nil @@ -654,7 +657,7 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { var loadQueries []string for pkgName := range refs { - loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) + loadQueries = append(loadQueries, "name="+pkgName) } sort.Strings(loadQueries) cfg := r.env.newPackagesConfig(packages.LoadFiles) @@ -675,7 +678,7 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { } func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.getResolver().scan(refs) + dirScan, err := pass.fixEnv.getResolver().scan(refs) if err != nil { return err } @@ -702,7 +705,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + found, err := findImport(ctx, pass.fixEnv, dirScan, pkgName, symbols, filename) if err != nil { firstErrOnce.Do(func() { @@ -738,42 +741,24 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return firstErr } -// notIdentifier reports whether ch is an invalid identifier character. -func notIdentifier(ch rune) bool { - return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || - '0' <= ch && ch <= '9' || - ch == '_' || - ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) -} - -// importPathToAssumedName returns the assumed package name of an import path. -// It does this using only string parsing of the import path. -// It picks the last element of the path that does not look like a major -// version, and then picks the valid identifier off the start of that element. -// It is used to determine if a local rename should be added to an import for -// clarity. -// This function could be moved to a standard package and exported if we want -// for use in other tools. -func importPathToAssumedName(importPath string) string { +// importPathToNameBasic assumes the package name is the base of import path, +// except that if the path ends in foo/vN, it assumes the package name is foo. +func importPathToNameBasic(importPath, srcDir string) (packageName string) { base := path.Base(importPath) if strings.HasPrefix(base, "v") { if _, err := strconv.Atoi(base[1:]); err == nil { dir := path.Dir(importPath) if dir != "." { - base = path.Base(dir) + return path.Base(dir) } } } - base = strings.TrimPrefix(base, "go-") - if i := strings.IndexFunc(base, notIdentifier); i >= 0 { - base = base[:i] - } return base } // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { - env *ProcessEnv + env *fixEnv } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -786,7 +771,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) ( // importPathToNameGoPath finds out the actual package name, as declared in its .go files. // If there's a problem, it returns "". -func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { +func importPathToName(env *fixEnv, importPath, srcDir string) (packageName string) { // Fast path for standard library without going to disk. if _, ok := stdlib[importPath]; ok { return path.Base(importPath) // stdlib packages always match their paths. @@ -922,7 +907,7 @@ func (r *gopathResolver) scan(_ references) ([]*pkg, error) { dir: dir, }) } - gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: Debug, ModulesEnabled: false}) return result, nil } @@ -941,8 +926,8 @@ func VendorlessPath(ipath string) string { // loadExports returns the set of exported symbols in the package at dir. // It returns nil on error or if the package name in dir does not match expectPackage. -func loadExports(ctx context.Context, env *ProcessEnv, expectPackage string, pkg *pkg) (map[string]bool, error) { - if env.Debug { +func loadExports(ctx context.Context, env *fixEnv, expectPackage string, pkg *pkg) (map[string]bool, error) { + if Debug { log.Printf("loading exports in dir %s (seeking package %s)", pkg.dir, expectPackage) } if pkg.goPackage != nil { @@ -1015,7 +1000,7 @@ func loadExports(ctx context.Context, env *ProcessEnv, expectPackage string, pkg } } - if env.Debug { + if Debug { exportList := make([]string, 0, len(exports)) for k := range exports { exportList = append(exportList, k) @@ -1028,7 +1013,7 @@ func loadExports(ctx context.Context, env *ProcessEnv, expectPackage string, pkg // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { +func findImport(ctx context.Context, env *fixEnv, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { pkgDir, err := filepath.Abs(filename) if err != nil { return nil, err @@ -1038,11 +1023,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, // Find candidate packages, looking only at their directory names first. var candidates []pkgDistance for _, pkg := range dirScan { - if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - continue - } if pkgIsCandidate(filename, pkgName, pkg) { candidates = append(candidates, pkgDistance{ pkg: pkg, @@ -1056,7 +1036,7 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Debug { + if Debug { for i, c := range candidates { log.Printf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } @@ -1095,9 +1075,9 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, wg.Done() }() - exports, err := loadExports(ctx, pass.env, pkgName, c.pkg) + exports, err := loadExports(ctx, env, pkgName, c.pkg) if err != nil { - if pass.env.Debug { + if Debug { log.Printf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } resc <- nil diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go deleted file mode 100644 index eef25969de..0000000000 --- a/vendor/golang.org/x/tools/imports/forward.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package imports implements a Go pretty-printer (like package "go/format") -// that also adds or removes import statements as necessary. -package imports // import "golang.org/x/tools/imports" - -import ( - "go/build" - - intimp "golang.org/x/tools/internal/imports" -) - -// Options specifies options for processing files. -type Options struct { - Fragment bool // Accept fragment of a source file (no package statement) - AllErrors bool // Report all errors (not just the first 10 on different lines) - - Comments bool // Print comments (true if nil *Options provided) - TabIndent bool // Use tabs for indent (true if nil *Options provided) - TabWidth int // Tab width (8 if nil *Options provided) - - FormatOnly bool // Disable the insertion and deletion of imports -} - -// Debug controls verbose logging. -var Debug = false - -// LocalPrefix is a comma-separated string of import path prefixes, which, if -// set, instructs Process to sort the import paths with the given prefixes -// into another group after 3rd-party packages. -var LocalPrefix string - -// Process formats and adjusts imports for the provided file. -// If opt is nil the defaults are used. -// -// Note that filename's directory influences which imports can be chosen, -// so it is important that filename be accurate. -// To process data ``as if'' it were in filename, pass the data as a non-nil src. -func Process(filename string, src []byte, opt *Options) ([]byte, error) { - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - intopt := &intimp.Options{ - Env: &intimp.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - Debug: Debug, - LocalPrefix: LocalPrefix, - }, - AllErrors: opt.AllErrors, - Comments: opt.Comments, - FormatOnly: opt.FormatOnly, - Fragment: opt.Fragment, - TabIndent: opt.TabIndent, - TabWidth: opt.TabWidth, - } - return intimp.Process(filename, src, intopt) -} - -// VendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". -func VendorlessPath(ipath string) string { - return intimp.VendorlessPath(ipath) -} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/imports/imports.go similarity index 90% rename from vendor/golang.org/x/tools/internal/imports/imports.go rename to vendor/golang.org/x/tools/imports/imports.go index 6253337604..07101cb804 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/imports/imports.go @@ -6,13 +6,14 @@ // Package imports implements a Go pretty-printer (like package "go/format") // that also adds or removes import statements as necessary. -package imports +package imports // import "golang.org/x/tools/imports" import ( "bufio" "bytes" "fmt" "go/ast" + "go/build" "go/format" "go/parser" "go/printer" @@ -26,10 +27,8 @@ import ( "golang.org/x/tools/go/ast/astutil" ) -// Options is golang.org/x/tools/imports.Options with extra internal-only options. +// Options specifies options for processing files. type Options struct { - Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. - Fragment bool // Accept fragment of a source file (no package statement) AllErrors bool // Report all errors (not just the first 10 on different lines) @@ -40,8 +39,21 @@ type Options struct { FormatOnly bool // Disable the insertion and deletion of imports } -// Process implements golang.org/x/tools/imports.Process with explicit context in env. +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data ``as if'' it were in filename, pass the data as a non-nil src. func Process(filename string, src []byte, opt *Options) ([]byte, error) { + env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT} + return process(filename, src, opt, env) +} + +func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) { + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } if src == nil { b, err := ioutil.ReadFile(filename) if err != nil { @@ -57,12 +69,12 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } if !opt.FormatOnly { - if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + if err := fixImports(fileSet, file, filename, env); err != nil { return nil, err } } - sortImports(opt.Env, fileSet, file) + sortImports(fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before for _, impSection := range imps { @@ -73,7 +85,7 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { lastGroup := -1 for _, importSpec := range impSection { importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(opt.Env, importPath) + groupNum := importGroup(importPath) if groupNum != lastGroup && lastGroup != -1 { spacesBefore = append(spacesBefore, importPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/mkindex.go b/vendor/golang.org/x/tools/imports/mkindex.go similarity index 99% rename from vendor/golang.org/x/tools/internal/imports/mkindex.go rename to vendor/golang.org/x/tools/imports/mkindex.go index ef8c0d2871..755e2394f2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mkindex.go +++ b/vendor/golang.org/x/tools/imports/mkindex.go @@ -8,7 +8,7 @@ // standard library. The file is intended to be built as part of the imports // package, so that the package may be used in environments where a GOROOT is // not available (such as App Engine). -package imports +package main import ( "bytes" diff --git a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go b/vendor/golang.org/x/tools/imports/mkstdlib.go similarity index 78% rename from vendor/golang.org/x/tools/internal/imports/mkstdlib.go rename to vendor/golang.org/x/tools/imports/mkstdlib.go index f67b5c1edb..5e53378fc8 100644 --- a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go +++ b/vendor/golang.org/x/tools/imports/mkstdlib.go @@ -3,7 +3,7 @@ // mkstdlib generates the zstdlib.go file, containing the Go standard // library API symbols. It's baked into the binary to avoid scanning // GOPATH in the common case. -package imports +package main import ( "bufio" @@ -14,7 +14,6 @@ import ( "io/ioutil" "log" "os" - "os/exec" "path/filepath" "regexp" "runtime" @@ -59,11 +58,6 @@ func main() { mustOpen(api("go1.9.txt")), mustOpen(api("go1.10.txt")), mustOpen(api("go1.11.txt")), - mustOpen(api("go1.12.txt")), - - // The API of the syscall/js package needs to be computed explicitly, - // because it's not included in the GOROOT/api/go1.*.txt files at this time. - syscallJSAPI(), ) sc := bufio.NewScanner(f) @@ -115,18 +109,3 @@ func main() { log.Fatal(err) } } - -// syscallJSAPI returns the API of the syscall/js package. -// It's computed from the contents of $(go env GOROOT)/src/syscall/js. -func syscallJSAPI() io.Reader { - var exeSuffix string - if runtime.GOOS == "windows" { - exeSuffix = ".exe" - } - cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") - out, err := cmd.Output() - if err != nil { - log.Fatalln(err) - } - return bytes.NewReader(out) -} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/imports/mod.go similarity index 97% rename from vendor/golang.org/x/tools/internal/imports/mod.go rename to vendor/golang.org/x/tools/imports/mod.go index a072214eee..ec769145cc 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/imports/mod.go @@ -22,9 +22,8 @@ import ( // moduleResolver implements resolver for modules using the go command as little // as feasible. type moduleResolver struct { - env *ProcessEnv + env *fixEnv - initialized bool main *moduleJSON modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path... modsByDir []*moduleJSON // ...or Dir. @@ -49,7 +48,7 @@ type moduleErrorJSON struct { } func (r *moduleResolver) init() error { - if r.initialized { + if r.main != nil { return nil } stdout, err := r.env.invokeGo("list", "-m", "-json", "...") @@ -62,7 +61,7 @@ func (r *moduleResolver) init() error { return err } if mod.Dir == "" { - if r.env.Debug { + if Debug { log.Printf("module %v has not been downloaded and will be ignored", mod.Path) } // Can't do anything with a module that's not downloaded. @@ -88,7 +87,6 @@ func (r *moduleResolver) init() error { return count(j) < count(i) // descending order }) - r.initialized = true return nil } @@ -204,9 +202,7 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { // Walk GOROOT, GOPATH/pkg/mod, and the main module. roots := []gopathwalk.Root{ {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - } - if r.main != nil { - roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + {r.main.Dir, gopathwalk.RootCurrentModule}, } for _, p := range filepath.SplitList(r.env.GOPATH) { roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) @@ -253,7 +249,7 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { matches := modCacheRegexp.FindStringSubmatch(subdir) modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Debug { + if Debug { log.Printf("decoding module cache path %q: %v", subdir, err) } return @@ -303,7 +299,7 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { importPathShort: VendorlessPath(importPath), dir: dir, }) - }, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + }, gopathwalk.Options{Debug: Debug, ModulesEnabled: true}) return result, nil } diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/imports/sortimports.go similarity index 85% rename from vendor/golang.org/x/tools/internal/imports/sortimports.go rename to vendor/golang.org/x/tools/imports/sortimports.go index 0a156fe2ef..f3dd56c7a6 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/imports/sortimports.go @@ -15,7 +15,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func sortImports(fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. @@ -95,7 +95,7 @@ type posSpan struct { End token.Pos } -func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -144,7 +144,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. - sort.Sort(byImportSpec{env, specs}) + sort.Sort(byImportSpec(specs)) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. @@ -197,19 +197,16 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp return specs } -type byImportSpec struct { - env *ProcessEnv - specs []ast.Spec // slice of *ast.ImportSpec -} +type byImportSpec []ast.Spec // slice of *ast.ImportSpec -func (x byImportSpec) Len() int { return len(x.specs) } -func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Len() int { return len(x) } +func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byImportSpec) Less(i, j int) bool { - ipath := importPath(x.specs[i]) - jpath := importPath(x.specs[j]) + ipath := importPath(x[i]) + jpath := importPath(x[j]) - igroup := importGroup(x.env, ipath) - jgroup := importGroup(x.env, jpath) + igroup := importGroup(ipath) + jgroup := importGroup(jpath) if igroup != jgroup { return igroup < jgroup } @@ -217,13 +214,13 @@ func (x byImportSpec) Less(i, j int) bool { if ipath != jpath { return ipath < jpath } - iname := importName(x.specs[i]) - jname := importName(x.specs[j]) + iname := importName(x[i]) + jname := importName(x[j]) if iname != jname { return iname < jname } - return importComment(x.specs[i]) < importComment(x.specs[j]) + return importComment(x[i]) < importComment(x[j]) } type byCommentPos []*ast.CommentGroup diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go similarity index 99% rename from vendor/golang.org/x/tools/internal/imports/zstdlib.go rename to vendor/golang.org/x/tools/imports/zstdlib.go index d81b8c5307..ca4f0b2f21 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/imports/zstdlib.go @@ -112,7 +112,6 @@ var stdlib = map[string]map[string]bool{ "Reader": true, "Repeat": true, "Replace": true, - "ReplaceAll": true, "Runes": true, "Split": true, "SplitAfter": true, @@ -442,9 +441,6 @@ var stdlib = map[string]map[string]bool{ "RequireAnyClientCert": true, "Server": true, "SignatureScheme": true, - "TLS_AES_128_GCM_SHA256": true, - "TLS_AES_256_GCM_SHA384": true, - "TLS_CHACHA20_POLY1305_SHA256": true, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": true, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": true, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true, @@ -473,7 +469,6 @@ var stdlib = map[string]map[string]bool{ "VersionTLS10": true, "VersionTLS11": true, "VersionTLS12": true, - "VersionTLS13": true, "X25519": true, "X509KeyPair": true, }, @@ -1840,7 +1835,6 @@ var stdlib = map[string]map[string]bool{ "R_PPC_UADDR32": true, "R_RISCV": true, "R_RISCV_32": true, - "R_RISCV_32_PCREL": true, "R_RISCV_64": true, "R_RISCV_ADD16": true, "R_RISCV_ADD32": true, @@ -2266,7 +2260,6 @@ var stdlib = map[string]map[string]bool{ "IMAGE_FILE_MACHINE_AMD64": true, "IMAGE_FILE_MACHINE_ARM": true, "IMAGE_FILE_MACHINE_ARM64": true, - "IMAGE_FILE_MACHINE_ARMNT": true, "IMAGE_FILE_MACHINE_EBC": true, "IMAGE_FILE_MACHINE_I386": true, "IMAGE_FILE_MACHINE_IA64": true, @@ -2760,7 +2753,6 @@ var stdlib = map[string]map[string]bool{ "New": true, "Note": true, "Package": true, - "PreserveAST": true, "Synopsis": true, "ToHTML": true, "ToText": true, @@ -2772,10 +2764,9 @@ var stdlib = map[string]map[string]bool{ "Source": true, }, "go/importer": map[string]bool{ - "Default": true, - "For": true, - "ForCompiler": true, - "Lookup": true, + "Default": true, + "For": true, + "Lookup": true, }, "go/parser": map[string]bool{ "AllErrors": true, @@ -3305,7 +3296,6 @@ var stdlib = map[string]map[string]bool{ "SeekEnd": true, "SeekStart": true, "Seeker": true, - "StringWriter": true, "TeeReader": true, "WriteCloser": true, "WriteSeeker": true, @@ -3508,12 +3498,6 @@ var stdlib = map[string]map[string]bool{ "Word": true, }, "math/bits": map[string]bool{ - "Add": true, - "Add32": true, - "Add64": true, - "Div": true, - "Div32": true, - "Div64": true, "LeadingZeros": true, "LeadingZeros16": true, "LeadingZeros32": true, @@ -3524,9 +3508,6 @@ var stdlib = map[string]map[string]bool{ "Len32": true, "Len64": true, "Len8": true, - "Mul": true, - "Mul32": true, - "Mul64": true, "OnesCount": true, "OnesCount16": true, "OnesCount32": true, @@ -3546,9 +3527,6 @@ var stdlib = map[string]map[string]bool{ "RotateLeft32": true, "RotateLeft64": true, "RotateLeft8": true, - "Sub": true, - "Sub32": true, - "Sub64": true, "TrailingZeros": true, "TrailingZeros16": true, "TrailingZeros32": true, @@ -3892,7 +3870,6 @@ var stdlib = map[string]map[string]bool{ "StatusTeapot": true, "StatusTemporaryRedirect": true, "StatusText": true, - "StatusTooEarly": true, "StatusTooManyRequests": true, "StatusUnauthorized": true, "StatusUnavailableForLegalReasons": true, @@ -4163,7 +4140,6 @@ var stdlib = map[string]map[string]bool{ "Truncate": true, "Unsetenv": true, "UserCacheDir": true, - "UserHomeDir": true, }, "os/exec": map[string]bool{ "Cmd": true, @@ -4268,7 +4244,6 @@ var stdlib = map[string]map[string]bool{ "MakeMapWithSize": true, "MakeSlice": true, "Map": true, - "MapIter": true, "MapOf": true, "Method": true, "New": true, @@ -4444,12 +4419,9 @@ var stdlib = map[string]map[string]bool{ "Version": true, }, "runtime/debug": map[string]bool{ - "BuildInfo": true, "FreeOSMemory": true, "GCStats": true, - "Module": true, "PrintStack": true, - "ReadBuildInfo": true, "ReadGCStats": true, "SetGCPercent": true, "SetMaxStack": true, @@ -4575,7 +4547,6 @@ var stdlib = map[string]map[string]bool{ "Reader": true, "Repeat": true, "Replace": true, - "ReplaceAll": true, "Replacer": true, "Split": true, "SplitAfter": true, @@ -6134,7 +6105,6 @@ var stdlib = map[string]map[string]bool{ "FreeLibrary": true, "Fsid": true, "Fstat": true, - "Fstatat": true, "Fstatfs": true, "Fstore_t": true, "Fsync": true, @@ -9392,7 +9362,6 @@ var stdlib = map[string]map[string]bool{ "Syscall": true, "Syscall12": true, "Syscall15": true, - "Syscall18": true, "Syscall6": true, "Syscall9": true, "Sysctl": true, @@ -9661,7 +9630,6 @@ var stdlib = map[string]map[string]bool{ "TransmitFile": true, "TransmitFileBuffers": true, "Truncate": true, - "UNIX_PATH_MAX": true, "USAGE_MATCH_TYPE_AND": true, "USAGE_MATCH_TYPE_OR": true, "UTF16FromString": true, @@ -9783,29 +9751,6 @@ var stdlib = map[string]map[string]bool{ "XP1_UNI_RECV": true, "XP1_UNI_SEND": true, }, - "syscall/js": map[string]bool{ - "Error": true, - "Func": true, - "FuncOf": true, - "Global": true, - "Null": true, - "Type": true, - "TypeBoolean": true, - "TypeFunction": true, - "TypeNull": true, - "TypeNumber": true, - "TypeObject": true, - "TypeString": true, - "TypeSymbol": true, - "TypeUndefined": true, - "TypedArray": true, - "TypedArrayOf": true, - "Undefined": true, - "Value": true, - "ValueError": true, - "ValueOf": true, - "Wrapper": true, - }, "testing": map[string]bool{ "AllocsPerRun": true, "B": true, diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c6..d86768a2c6 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -71,30 +71,3 @@ A few APIs were cleaned up, and there are some differences: [blobstore package](https://google.golang.org/appengine/blobstore). * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. - -## Key Encode/Decode compatibiltiy to help with datastore library migrations - -Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. -The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. - -### Enabling key conversion - -Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling. - -#### 1. Basic or manual scaling - -This start handler will enable key conversion for all handlers in the service. - -``` -http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) { - datastore.EnableKeyConversion(appengine.NewContext(r)) -}) -``` - -#### 2. Automatic scaling - -`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))` -before you use code that needs key conversion. - -You may want to add this to each of your handlers, or introduce middleware where it's called. -`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674f..0cca033d37 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -97,6 +97,8 @@ func WithContext(parent context.Context, req *http.Request) context.Context { return internal.WithContext(parent, req) } +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + // BlobKey is a key for a blobstore blob. // // Conceptually, this type belongs in the blobstore package, but it lives in diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod index 4515927985..f449359d2f 100644 --- a/vendor/google.golang.org/appengine/go.mod +++ b/vendor/google.golang.org/appengine/go.mod @@ -1,10 +1,7 @@ module google.golang.org/appengine require ( - github.com/golang/protobuf v1.3.1 - golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect - golang.org/x/net v0.0.0-20190603091049-60506f45cf65 - golang.org/x/sys v0.0.0-20190606165138-5da285871e9c // indirect - golang.org/x/text v0.3.2 - golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b // indirect + github.com/golang/protobuf v1.2.0 + golang.org/x/net v0.0.0-20180724234803-3673e40ba225 + golang.org/x/text v0.3.0 ) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum index cb3232556b..1a221c0896 100644 --- a/vendor/google.golang.org/appengine/go.sum +++ b/vendor/google.golang.org/appengine/go.sum @@ -1,22 +1,6 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index a6ec19e14c..bbc1cb9c34 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -44,7 +44,6 @@ var ( curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") // Outgoing headers. apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") @@ -495,9 +494,6 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) if ticket == "" { ticket = DefaultTicket() } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go deleted file mode 100644 index 5b6c587a96..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/httpbody.proto - -package httpbody - -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Message that represents an arbitrary HTTP body. It should only be used for -// payload formats that can't be represented as JSON, such as raw binary or -// an HTML page. -// -// -// This message can be used both in streaming and non-streaming API methods in -// the request as well as the response. -// -// It can be used as a top-level request field, which is convenient if one -// wants to extract parameters from either the URL or HTTP template into the -// request fields and also want access to the raw HTTP body. -// -// Example: -// -// message GetResourceRequest { -// // A unique request id. -// string request_id = 1; -// -// // The raw HTTP body is bound to this field. -// google.api.HttpBody http_body = 2; -// } -// -// service ResourceService { -// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) returns -// (google.protobuf.Empty); -// } -// -// Example with streaming methods: -// -// service CaldavService { -// rpc GetCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// rpc UpdateCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// } -// -// Use of this type only changes how the request and response bodies are -// handled, all other features will continue to work unchanged. -type HttpBody struct { - // The HTTP Content-Type header value specifying the content type of the body. - ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - // The HTTP request/response body as raw binary. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - // Application specific response metadata. Must be set in the first response - // for streaming APIs. - Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HttpBody) Reset() { *m = HttpBody{} } -func (m *HttpBody) String() string { return proto.CompactTextString(m) } -func (*HttpBody) ProtoMessage() {} -func (*HttpBody) Descriptor() ([]byte, []int) { - return fileDescriptor_09ea2ecaa32a0070, []int{0} -} - -func (m *HttpBody) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HttpBody.Unmarshal(m, b) -} -func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic) -} -func (m *HttpBody) XXX_Merge(src proto.Message) { - xxx_messageInfo_HttpBody.Merge(m, src) -} -func (m *HttpBody) XXX_Size() int { - return xxx_messageInfo_HttpBody.Size(m) -} -func (m *HttpBody) XXX_DiscardUnknown() { - xxx_messageInfo_HttpBody.DiscardUnknown(m) -} - -var xxx_messageInfo_HttpBody proto.InternalMessageInfo - -func (m *HttpBody) GetContentType() string { - if m != nil { - return m.ContentType - } - return "" -} - -func (m *HttpBody) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *HttpBody) GetExtensions() []*any.Any { - if m != nil { - return m.Extensions - } - return nil -} - -func init() { - proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody") -} - -func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070) } - -var fileDescriptor_09ea2ecaa32a0070 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30, - 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09, - 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7, - 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf, - 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc, - 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c, - 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e, - 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35, - 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c, - 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b, - 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52, - 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38, - 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec, - 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16, - 0x2b, 0x2d, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 0b9907f89b..7bfe37a3db 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,15 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/rpc/status.proto -package status +package status // import "google.golang.org/genproto/googleapis/rpc/status" -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -20,27 +17,26 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: // // - Simple to use and understand for most users // - Flexible enough to meet unexpected needs // // # Overview // -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. // // # Language mapping // @@ -76,13 +72,11 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // - Logging. If some API errors are stored in logs, the message `Status` could // be used directly after any stripping needed for security/privacy reasons. type Status struct { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. @@ -96,17 +90,16 @@ func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_24d244abaf643bfe, []int{0} + return fileDescriptor_status_c6e4de62dcdf2edf, []int{0} } - func (m *Status) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Status.Unmarshal(m, b) } func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Status.Marshal(b, m, deterministic) } -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) } func (m *Status) XXX_Size() int { return xxx_messageInfo_Status.Size(m) @@ -142,9 +135,9 @@ func init() { proto.RegisterType((*Status)(nil), "google.rpc.Status") } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) } -var fileDescriptor_24d244abaf643bfe = []byte{ +var fileDescriptor_status_c6e4de62dcdf2edf = []byte{ // 209 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go deleted file mode 100644 index a0889f0c7a..0000000000 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/field_mask.proto - -package field_mask - -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `FieldMask` represents a set of symbolic field paths, for example: -// -// paths: "f.a" -// paths: "f.b.d" -// -// Here `f` represents a field in some root message, `a` and `b` -// fields in the message found in `f`, and `d` a field found in the -// message in `f.b`. -// -// Field masks are used to specify a subset of fields that should be -// returned by a get operation or modified by an update operation. -// Field masks also have a custom JSON encoding (see below). -// -// # Field Masks in Projections -// -// When used in the context of a projection, a response message or -// sub-message is filtered by the API to only contain those fields as -// specified in the mask. For example, if the mask in the previous -// example is applied to a response message as follows: -// -// f { -// a : 22 -// b { -// d : 1 -// x : 2 -// } -// y : 13 -// } -// z: 8 -// -// The result will not contain specific values for fields x,y and z -// (their value will be set to the default, and omitted in proto text -// output): -// -// -// f { -// a : 22 -// b { -// d : 1 -// } -// } -// -// A repeated field is not allowed except at the last position of a -// paths string. -// -// If a FieldMask object is not present in a get operation, the -// operation applies to all fields (as if a FieldMask of all fields -// had been specified). -// -// Note that a field mask does not necessarily apply to the -// top-level response message. In case of a REST get operation, the -// field mask applies directly to the response, but in case of a REST -// list operation, the mask instead applies to each individual message -// in the returned resource list. In case of a REST custom method, -// other definitions may be used. Where the mask applies will be -// clearly documented together with its declaration in the API. In -// any case, the effect on the returned resource/resources is required -// behavior for APIs. -// -// # Field Masks in Update Operations -// -// A field mask in update operations specifies which fields of the -// targeted resource are going to be updated. The API is required -// to only change the values of the fields as specified in the mask -// and leave the others untouched. If a resource is passed in to -// describe the updated values, the API ignores the values of all -// fields not covered by the mask. -// -// If a repeated field is specified for an update operation, new values will -// be appended to the existing repeated field in the target resource. Note that -// a repeated field is only allowed in the last position of a `paths` string. -// -// If a sub-message is specified in the last position of the field mask for an -// update operation, then new value will be merged into the existing sub-message -// in the target resource. -// -// For example, given the target message: -// -// f { -// b { -// d: 1 -// x: 2 -// } -// c: [1] -// } -// -// And an update message: -// -// f { -// b { -// d: 10 -// } -// c: [2] -// } -// -// then if the field mask is: -// -// paths: ["f.b", "f.c"] -// -// then the result will be: -// -// f { -// b { -// d: 10 -// x: 2 -// } -// c: [1, 2] -// } -// -// An implementation may provide options to override this default behavior for -// repeated and message fields. -// -// In order to reset a field's value to the default, the field must -// be in the mask and set to the default value in the provided resource. -// Hence, in order to reset all fields of a resource, provide a default -// instance of the resource and set all fields in the mask, or do -// not provide a mask as described below. -// -// If a field mask is not present on update, the operation applies to -// all fields (as if a field mask of all fields has been specified). -// Note that in the presence of schema evolution, this may mean that -// fields the client does not know and has therefore not filled into -// the request will be reset to their default. If this is unwanted -// behavior, a specific service may require a client to always specify -// a field mask, producing an error if not. -// -// As with get operations, the location of the resource which -// describes the updated values in the request message depends on the -// operation kind. In any case, the effect of the field mask is -// required to be honored by the API. -// -// ## Considerations for HTTP REST -// -// The HTTP kind of an update operation which uses a field mask must -// be set to PATCH instead of PUT in order to satisfy HTTP semantics -// (PUT must only be used for full updates). -// -// # JSON Encoding of Field Masks -// -// In JSON, a field mask is encoded as a single string where paths are -// separated by a comma. Fields name in each path are converted -// to/from lower-camel naming conventions. -// -// As an example, consider the following message declarations: -// -// message Profile { -// User user = 1; -// Photo photo = 2; -// } -// message User { -// string display_name = 1; -// string address = 2; -// } -// -// In proto a field mask for `Profile` may look as such: -// -// mask { -// paths: "user.display_name" -// paths: "photo" -// } -// -// In JSON, the same mask is represented as below: -// -// { -// mask: "user.displayName,photo" -// } -// -// # Field Masks and Oneof Fields -// -// Field masks treat fields in oneofs just as regular fields. Consider the -// following message: -// -// message SampleMessage { -// oneof test_oneof { -// string name = 4; -// SubMessage sub_message = 9; -// } -// } -// -// The field mask can be: -// -// mask { -// paths: "name" -// } -// -// Or: -// -// mask { -// paths: "sub_message" -// } -// -// Note that oneof type names ("test_oneof" in this case) cannot be used in -// paths. -// -// ## Field Mask Verification -// -// The implementation of any API method which has a FieldMask type field in the -// request should verify the included field paths, and return an -// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. -type FieldMask struct { - // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldMask) Reset() { *m = FieldMask{} } -func (m *FieldMask) String() string { return proto.CompactTextString(m) } -func (*FieldMask) ProtoMessage() {} -func (*FieldMask) Descriptor() ([]byte, []int) { - return fileDescriptor_5158202634f0da48, []int{0} -} - -func (m *FieldMask) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldMask.Unmarshal(m, b) -} -func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) -} -func (m *FieldMask) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldMask.Merge(m, src) -} -func (m *FieldMask) XXX_Size() int { - return xxx_messageInfo_FieldMask.Size(m) -} -func (m *FieldMask) XXX_DiscardUnknown() { - xxx_messageInfo_FieldMask.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldMask proto.InternalMessageInfo - -func (m *FieldMask) GetPaths() []string { - if m != nil { - return m.Paths - } - return nil -} - -func init() { - proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") -} - -func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } - -var fileDescriptor_5158202634f0da48 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, - 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, - 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, - 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, - 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, - 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, - 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, - 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, - 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, - 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 591343edfb..f443eec9a0 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,8 +2,6 @@ language: go matrix: include: - - go: 1.12.x - env: GO111MODULE=on - go: 1.11.x env: VET=1 GO111MODULE=on - go: 1.11.x diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index ca34e8aa0d..0863eb26b6 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -31,7 +31,6 @@ How to get your contributions merged smoothly and quickly. - `make vet` to catch vet errors - `make test` to run the tests - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabde..41a754f977 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,4 +1,4 @@ -all: vet test testrace +all: vet test testrace testappengine build: deps go build google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index f5eec6717f..e3fb3c75ae 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -20,7 +20,7 @@ gRPC-Go requires Go 1.9 or later. Constraints ----------- -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. Documentation ------------- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 97c6e2568f..fa31565fd2 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -17,7 +17,7 @@ */ // See internal/backoff package for the backoff implementation. This file is -// kept for the exported types and API backward compatibility. +// kept for the exported types and API backward compatility. package grpc diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index fafede238c..317c2e728e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -61,7 +61,7 @@ func init() { } // Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insensitive fashion. +// Note that the compare is done in a case-insenstive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { if b, ok := m[strings.ToLower(name)]; ok { @@ -127,7 +127,7 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal + // UpdateBalancerState is called by balancer to nofity gRPC that some internal // state in balancer has changed. // // gRPC will update the connectivity state of the ClientConn, and will call pick @@ -171,6 +171,9 @@ type PickOptions struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Header contains the metadata from the RPC's client header. The metadata + // should not be modified; make a copy first if needed. + Header metadata.MD } // DoneInfo contains additional information for done. @@ -183,11 +186,6 @@ type DoneInfo struct { BytesSent bool // BytesReceived indicates if any byte has been received from the server. BytesReceived bool - // ServerLoad is the load received from server. It's usually sent as part of - // trailing metadata. - // - // The only supported type now is *orca_v1.LoadReport. - ServerLoad interface{} } var ( @@ -217,10 +215,8 @@ type Picker interface { // // If a SubConn is returned: // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // until UpdateBalancerState() is called and will call pick on the new picker. // // If the returned error is not nil: // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() @@ -231,10 +227,9 @@ type Picker interface { // - Else (error is other non-nil error): // - The RPC will fail with unavailable error. // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) } @@ -253,46 +248,18 @@ type Balancer interface { // that back to gRPC. // Balancer should also generate and update Pickers when its internal state has // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. HandleSubConnStateChange(sc SubConn, state connectivity.State) // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to // balancers. // Balancer can create new SubConn or remove SubConn with the addresses. // An empty address slice and a non-nil error will be passed if the resolver returns // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateResolverState will be called instead. HandleResolvedAddrs([]resolver.Address, error) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - ConnectivityState connectivity.State - // TODO: add last connection error -} - -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateResolverState method will be called instead -// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of -// HandleSubConnStateChange. -type V2Balancer interface { - // UpdateResolverState is called by gRPC when the state of the resolver - // changes. - UpdateResolverState(resolver.State) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c5a51bd1d9..245785e7a2 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -67,16 +67,14 @@ type baseBalancer struct { } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") -} - -func (b *baseBalancer) UpdateResolverState(s resolver.State) { - // TODO: handle s.Err (log if not nil) once implemented. - // TODO: handle s.ServiceConfig? - grpclog.Infoln("base.baseBalancer: got new resolver state: ", s) + if err != nil { + grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) - for _, a := range s.Addresses { + for _, a := range addrs { addrsSet[a] = struct{}{} if _, ok := b.subConns[a]; !ok { // a is a new address (not existing in b.subConns). @@ -122,11 +120,6 @@ func (b *baseBalancer) regeneratePicker() { } func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") -} - -func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - s := state.ConnectivityState grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) oldS, ok := b.scStates[sc] if !ok { diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 29f7a4ddd6..57aea9fb4d 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -28,7 +28,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" ) @@ -48,19 +47,12 @@ type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - if len(readySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } var scs []balancer.SubConn for _, sc := range readySCs { scs = append(scs, sc) } return &rrPicker{ subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: grpcrand.Intn(len(scs)), } } @@ -75,6 +67,10 @@ type rrPicker struct { } func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + p.mu.Lock() sc := p.subConns[p.next] p.next = (p.next + 1) % len(p.subConns) diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index bc965f0aca..7233ade293 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -82,13 +82,20 @@ func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { return b.c } +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { cc *ClientConn balancer balancer.Balancer stateChangeQueue *scStateUpdateBuffer - resolverUpdateCh chan *resolver.State + resolverUpdateCh chan *resolverUpdate done chan struct{} mu sync.Mutex @@ -99,7 +106,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui ccb := &ccBalancerWrapper{ cc: cc, stateChangeQueue: newSCStateUpdateBuffer(), - resolverUpdateCh: make(chan *resolver.State, 1), + resolverUpdateCh: make(chan *resolverUpdate, 1), done: make(chan struct{}), subConns: make(map[*acBalancerWrapper]struct{}), } @@ -121,23 +128,15 @@ func (ccb *ccBalancerWrapper) watcher() { return default: } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) - } else { - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - } - case s := <-ccb.resolverUpdateCh: + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: select { case <-ccb.done: ccb.balancer.Close() return default: } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateResolverState(*s) - } else { - ccb.balancer.HandleResolvedAddrs(s.Addresses, nil) - } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) case <-ccb.done: } @@ -178,23 +177,37 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) { +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { if ccb.cc.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue + var containsGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + containsGRPCLB = true + break } - i++ + } + if containsGRPCLB { + // The current balancer is not grpclb, but addresses contain grpclb + // address. This means we failed to switch to grpclb, most likely + // because grpclb is not registered. Filter out all grpclb addresses + // from addrs before sending to balancer. + tempAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + if a.Type != resolver.GRPCLB { + tempAddrs = append(tempAddrs, a) + } + } + addrs = tempAddrs } } select { case <-ccb.resolverUpdateCh: default: } - ccb.resolverUpdateCh <- &s + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 29bda6353d..42b60feaa6 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -281,8 +281,9 @@ func (bw *balancerWrapper) Close() { } // The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. // It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { failfast := true // Default failfast is true. if ss, ok := rpcInfoFromContext(ctx); ok { failfast = ss.failfast @@ -291,51 +292,35 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) if err != nil { return nil, nil, err } + var done func(balancer.DoneInfo) if p != nil { - done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() + done = func(i balancer.DoneInfo) { p() } } - + var sc balancer.SubConn bw.mu.Lock() defer bw.mu.Unlock() if bw.pickfirst { // Get the first sc in conns. - for _, sc := range bw.conns { - return sc, done, nil + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, balancer.ErrTransientFailure + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, balancer.ErrTransientFailure } - return nil, nil, balancer.ErrNoSubConnAvailable - } - sc, ok1 := bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[sc] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return nil, nil, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return sc, done, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return nil, nil, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return nil, nil, balancer.ErrNoSubConnAvailable } + + return sc, done, nil } diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d385..100f05dc74 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -40,7 +40,7 @@ func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply int func combine(o1 []CallOption, o2 []CallOption) []CallOption { // we don't use append because o1 could have extra capacity whose // elements would be overwritten, which could cause inadvertent - // sharing (and race conditions) between concurrent calls + // sharing (and race connditions) between concurrent calls if len(o1) == 0 { return o2 } else if len(o2) == 0 { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index bd2d2b3177..56d0bf713d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. @@ -68,9 +69,11 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errBalancerClosed indicates that the balancer is closed. errBalancerClosed = errors.New("grpc: balancer is closed") - // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default - // service config. - invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // We use an accessor so that minConnectTimeout can be + // atomically read and updated while testing. + getMinConnectTimeout = func() time.Duration { + return minConnectTimeout + } ) // The following errors are returned from Dial and DialContext @@ -83,7 +86,7 @@ var ( // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure + // information (e.g., oauth2 token) which requires secure connection on an insecure // connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") // errCredentialsConflict indicates that grpc.WithTransportCredentials() @@ -137,12 +140,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * opt.apply(&cc.dopts) } - defer func() { - if err != nil { - cc.Close() - } - }() - if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) @@ -182,13 +179,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } } - if cc.dopts.defaultServiceConfigRawJSON != nil { - sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) - } - cc.dopts.defaultServiceConfig = sc - } cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { @@ -211,12 +201,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) defer cancel() } + defer func() { select { case <-ctx.Done(): conn, err = nil, ctx.Err() default: } + + if err != nil { + cc.Close() + } }() scSet := false @@ -225,7 +220,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * select { case sc, ok := <-cc.dopts.scChan: if ok { - cc.sc = &sc + cc.sc = sc scSet = true } default: @@ -242,9 +237,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is + // If resolver builder is still nil, the parse target's scheme is // not registered. Fallback to default resolver and set Endpoint to - // the original target. + // the original unparsed target. grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), @@ -271,7 +266,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * select { case sc, ok := <-cc.dopts.scChan: if ok { - cc.sc = &sc + cc.sc = sc } case <-ctx.Done(): return nil, ctx.Err() @@ -393,11 +388,14 @@ type ClientConn struct { mu sync.RWMutex resolverWrapper *ccResolverWrapper - sc *ServiceConfig + sc ServiceConfig + scRaw string conns map[*addrConn]struct{} // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters curBalancerName string + preBalancerName string // previous balancer name. + curAddresses []resolver.Address balancerWrapper *ccBalancerWrapper retryThrottler atomic.Value @@ -438,8 +436,9 @@ func (cc *ClientConn) scWatcher() { } cc.mu.Lock() // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc + // We may revist this decision in the future. + cc.sc = sc + cc.scRaw = "" cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -466,72 +465,50 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { } } -// gRPC should resort to default service config when: -// * resolver service config is disabled -// * or, resolver does not return a service config or returns an invalid one. -func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool { - if cc.dopts.disableServiceConfig { - return true - } - // The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type. - // Right now, we assume that empty service config string means resolver does not return a config. - if sc == "" { - return true - } - // TODO: the logic below is temporary. Once we finish the logic to validate service config - // in resolver, we will replace the logic below. - _, err := parseServiceConfig(sc) - return err != nil -} - -func (cc *ClientConn) updateResolverState(s resolver.State) error { +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { cc.mu.Lock() defer cc.mu.Unlock() - // Check if the ClientConn is already closed. Some fields (e.g. - // balancerWrapper) are set to nil when closing the ClientConn, and could - // cause nil pointer panic if we don't have this check. if cc.conns == nil { - return nil + // cc was closed. + return } - if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) { - if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { - cc.applyServiceConfig(cc.dopts.defaultServiceConfig) - } - } else { - // TODO: the parsing logic below will be moved inside resolver. - sc, err := parseServiceConfig(s.ServiceConfig) - if err != nil { - return err - } - if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig { - cc.applyServiceConfig(sc) - } + if reflect.DeepEqual(cc.curAddresses, addrs) { + return } - // update the service config that will be sent to balancer. - if cc.sc != nil { - s.ServiceConfig = cc.sc.rawJSONString - } + cc.curAddresses = addrs + cc.firstResolveEvent.Fire() if cc.dopts.balancerBuilder == nil { // Only look at balancer types and switch balancer if balancer dial // option is not set. var isGRPCLB bool - for _, a := range s.Addresses { + for _, a := range addrs { if a.Type == resolver.GRPCLB { isGRPCLB = true break } } var newBalancerName string - // TODO: use new loadBalancerConfig field with appropriate priority. if isGRPCLB { newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB } else { - newBalancerName = PickFirstBalancerName + // Address list doesn't contain grpclb address. Try to pick a + // non-grpclb balancer. + newBalancerName = cc.curBalancerName + // If current balancer is grpclb, switch to the previous one. + if newBalancerName == grpclbName { + newBalancerName = cc.preBalancerName + } + // The following could be true in two cases: + // - the first time handling resolved addresses + // (curBalancerName="") + // - the first time handling non-grpclb addresses + // (curBalancerName="grpclb", preBalancerName="") + if newBalancerName == "" { + newBalancerName = PickFirstBalancerName + } } cc.switchBalancer(newBalancerName) } else if cc.balancerWrapper == nil { @@ -540,9 +517,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateResolverState(s) - cc.firstResolveEvent.Fire() - return nil + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) } // switchBalancer starts the switching from current balancer to the balancer @@ -554,6 +529,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { // // Caller must hold cc.mu. func (cc *ClientConn) switchBalancer(name string) { + if cc.conns == nil { + return + } + if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { return } @@ -563,11 +542,15 @@ func (cc *ClientConn) switchBalancer(name string) { grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") return } + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. if cc.balancerWrapper != nil { cc.balancerWrapper.close() } builder := balancer.Get(name) + // TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should + // we reuse previous one? if channelz.IsOn() { if builder == nil { channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ @@ -586,6 +569,7 @@ func (cc *ClientConn) switchBalancer(name string) { builder = newPickfirstBuilder() } + cc.preBalancerName = cc.curBalancerName cc.curBalancerName = builder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) } @@ -748,9 +732,6 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } m, ok := cc.sc.Methods[method] if !ok { i := strings.LastIndex(method, "/") @@ -762,15 +743,14 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return nil - } return cc.sc.healthCheckConfig } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + hdr, _ := metadata.FromOutgoingContext(ctx) t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ FullMethodName: method, + Header: hdr, }) if err != nil { return nil, nil, toRPCErr(err) @@ -778,25 +758,65 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st return t, done, nil } -func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { - if sc == nil { - // should never reach here. - return fmt.Errorf("got nil pointer for service config") +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + if cc.dopts.disableServiceConfig { + return nil + } + if cc.scRaw == js { + return nil + } + if channelz.IsOn() { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + // The special formatting of \"%s\" instead of %q is to provide nice printing of service config + // for human consumption. + Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js), + Severity: channelz.CtINFO, + }) + } + sc, err := parseServiceConfig(js) + if err != nil { + return err } + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + cc.scRaw = js cc.sc = sc - if cc.sc.retryThrottling != nil { + if sc.retryThrottling != nil { newThrottler := &retryThrottler{ - tokens: cc.sc.retryThrottling.MaxTokens, - max: cc.sc.retryThrottling.MaxTokens, - thresh: cc.sc.retryThrottling.MaxTokens / 2, - ratio: cc.sc.retryThrottling.TokenRatio, + tokens: sc.retryThrottling.MaxTokens, + max: sc.retryThrottling.MaxTokens, + thresh: sc.retryThrottling.MaxTokens / 2, + ratio: sc.retryThrottling.TokenRatio, } cc.retryThrottler.Store(newThrottler) } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. + if cc.curBalancerName == grpclbName { + // If current balancer is grpclb, there's at least one grpclb + // balancer address in the resolved list. Don't switch the balancer, + // but change the previous balancer name, so if a new resolved + // address list doesn't contain grpclb address, balancer will be + // switched to *sc.LB. + cc.preBalancerName = *sc.LB + } else { + cc.switchBalancer(*sc.LB) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) + } + } + + cc.mu.Unlock() return nil } @@ -872,7 +892,7 @@ func (cc *ClientConn) Close() error { } channelz.AddTraceEvent(cc.channelzID, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. + // the entity beng deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) } return nil @@ -901,11 +921,14 @@ type addrConn struct { // Use updateConnectivityState for updating addrConn's connectivity state. state connectivity.State + tearDownErr error // The reason this addrConn is torn down. + backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. - czData *channelzData + channelzID int64 // channelz unique identification number. + czData *channelzData + healthCheckEnabled bool } // Note: this requires a lock on ac.mu. @@ -915,6 +938,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State) { } updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) + grpclog.Infof(updateMsg) ac.state = s if channelz.IsOn() { channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ @@ -941,169 +965,192 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) - } + tryNextAddrFromStart := grpcsync.NewEvent() ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOption{}) } - addrs := ac.addrs backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + addrLoop: + for _, addr := range addrs { ac.mu.Lock() + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } - ac.updateConnectivityState(connectivity.TransientFailure) + ac.updateConnectivityState(connectivity.Connecting) + ac.transport = nil + ac.mu.Unlock() - // Backoff. - b := ac.resetBackoff + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + connectDeadline := time.Now().Add(dialDuration) + + ac.mu.Lock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + hctx, hcancel := context.WithCancel(ac.ctx) + defer hcancel() ac.mu.Unlock() - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + reconnect := grpcsync.NewEvent() + prefaceReceived := make(chan struct{}) + newTr, err := ac.createTransport(addr, copts, connectDeadline, reconnect, prefaceReceived) + if err == nil { ac.mu.Lock() - ac.backoffIdx++ + ac.curAddr = addr + ac.transport = newTr ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return + + healthCheckConfig := ac.cc.healthCheckConfig() + // LB channel health checking is only enabled when all the four requirements below are met: + // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption, + // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package, + // 3. a service config with non-empty healthCheckConfig field is provided, + // 4. the current load balancer allows it. + healthcheckManagingState := false + if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled { + if ac.cc.dopts.healthCheckFunc == nil { + // TODO: add a link to the health check doc in the error message. + grpclog.Error("the client side LB channel health check function has not been set.") + } else { + // TODO(deklerk) refactor to just return transport + go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName) + healthcheckManagingState = true + } + } + if !healthcheckManagingState { + ac.mu.Lock() + ac.updateConnectivityState(connectivity.Ready) + ac.mu.Unlock() + } + } else { + hcancel() + if err == errConnClosing { + return + } + + if tryNextAddrFromStart.HasFired() { + break addrLoop + } + continue } - continue - } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - newTr.Close() + ac.mu.Lock() + reqHandshake := ac.dopts.reqHandshake ac.mu.Unlock() - return - } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - healthCheckConfig := ac.cc.healthCheckConfig() - // LB channel health checking is only enabled when all the four requirements below are met: - // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption, - // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package, - // 3. a service config with non-empty healthCheckConfig field is provided, - // 4. the current load balancer allows it. - hctx, hcancel := context.WithCancel(ac.ctx) - healthcheckManagingState := false - if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled { - if ac.cc.dopts.healthCheckFunc == nil { - // TODO: add a link to the health check doc in the error message. - grpclog.Error("the client side LB channel health check function has not been set.") + + <-reconnect.Done() + hcancel() + + if reqHandshake == envconfig.RequireHandshakeHybrid { + // In RequireHandshakeHybrid mode, we must check to see whether + // server preface has arrived yet to decide whether to start + // reconnecting at the top of the list (server preface received) + // or continue with the next addr in the list as if the + // connection were not successful (server preface not received). + select { + case <-prefaceReceived: + // We received a server preface - huzzah! We consider this + // a success and restart from the top of the addr list. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() + break addrLoop + default: + // Despite having set state to READY, in hybrid mode we + // consider this a failure and continue connecting at the + // next addr in the list. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + ac.updateConnectivityState(connectivity.TransientFailure) + ac.mu.Unlock() + + if tryNextAddrFromStart.HasFired() { + break addrLoop + } + } } else { - // TODO(deklerk) refactor to just return transport - go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName) - healthcheckManagingState = true + // In RequireHandshakeOn mode, we would have already waited for + // the server preface, so we consider this a success and restart + // from the top of the addr list. In RequireHandshakeOff mode, + // we don't care to wait for the server preface before + // considering this a success, so we also restart from the top + // of the addr list. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() + break addrLoop } } - if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) - } - ac.mu.Unlock() - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - - // Need to reconnect after a READY, the addrConn enters - // TRANSIENT_FAILURE. - // - // This will set addrConn to TRANSIENT_FAILURE for a very short period - // of time, and turns CONNECTING. It seems reasonable to skip this, but - // READY-CONNECTING is not a valid transition. + // After exhausting all addresses, or after need to reconnect after a + // READY, the addrConn enters TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } ac.updateConnectivityState(connectivity.TransientFailure) - ac.mu.Unlock() - } -} - -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { - for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing - } - ac.updateConnectivityState(connectivity.Connecting) - ac.transport = nil - - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - copts := ac.dopts.copts - if ac.scopts.CredsBundle != nil { - copts.CredsBundle = ac.scopts.CredsBundle - } + // Backoff. + b := ac.resetBackoff + timer := time.NewTimer(backoffFor) + acctx := ac.ctx ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } - - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) - if err == nil { - return newTr, addr, reconnect, nil + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-acctx.Done(): + timer.Stop() + return } - ac.cc.blockingpicker.updateConnectionError(err) } - - // Couldn't connect to any address. - return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) +// createTransport creates a connection to one of the backends in addrs. It +// sets ac.transport in the success case, or it returns an error if it was +// unable to successfully create a transport. +// +// If waitForHandshake is enabled, it blocks until server preface arrives. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time, reconnect *grpcsync.Event, prefaceReceived chan struct{}) (transport.ClientTransport, error) { onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() target := transport.TargetInfo{ Addr: addr.Addr, @@ -1111,6 +1158,8 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne Authority: ac.cc.authority, } + prefaceTimer := time.NewTimer(connectDeadline.Sub(time.Now())) + onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1120,11 +1169,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onClose := func() { close(onCloseCalled) + prefaceTimer.Stop() reconnect.Fire() } onPrefaceReceipt := func() { close(prefaceReceived) + prefaceTimer.Stop() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) @@ -1134,28 +1185,69 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne } newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + + if err == nil { + if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { + select { + case <-prefaceTimer.C: + // We didn't get the preface in time. + newTr.Close() + err = errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, errors.New("connection closed") + } + } else if ac.dopts.reqHandshake == envconfig.RequireHandshakeHybrid { + go func() { + select { + case <-prefaceTimer.C: + // We didn't get the preface in time. + newTr.Close() + case <-prefaceReceived: + // We got the preface just in the nick of time - huzzah! + case <-onCloseCalled: + // The transport has already closed - noop. + } + }() + } + } + if err != nil { // newTr is either nil, or closed. + ac.cc.blockingpicker.updateConnectionError(err) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + + return nil, errConnClosing + } + ac.mu.Unlock() grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) - return nil, nil, err + return nil, err } - if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { - select { - case <-time.After(connectDeadline.Sub(time.Now())): - // We didn't get the preface in time. - newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. - } + // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return nil, errConnClosing } - return newTr, reconnect, nil + ac.mu.Unlock() + + // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return nil, errConnClosing + } + ac.mu.Unlock() + + return newTr, nil } func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) { @@ -1243,6 +1335,7 @@ func (ac *addrConn) tearDown(err error) { // between setting the state and logic that waits on context cancelation / etc. ac.updateConnectivityState(connectivity.Shutdown) ac.cancel() + ac.tearDownErr = err ac.curAddr = resolver.Address{} if err == errConnDrain && curTr != nil { // GracefulClose(...) may be executed multiple times when diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fbf6..b1d7dbc547 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -52,7 +52,7 @@ func (s State) String() string { const ( // Idle indicates the ClientConn is idle. Idle State = iota - // Connecting indicates the ClientConn is connecting. + // Connecting indicates the ClienConn is connecting. Connecting // Ready indicates the ClientConn is ready for work. Ready diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 88aff94596..a851560456 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,6 +36,9 @@ import ( "google.golang.org/grpc/credentials/internal" ) +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} + // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { @@ -205,23 +208,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + tc.config.NextProtos = alpnProtoStr return tc } diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go deleted file mode 100644 index ccbf35b331..0000000000 --- a/vendor/google.golang.org/grpc/credentials/tls13.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e114fecbb7..f286462449 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -55,16 +54,13 @@ type dialOptions struct { // balancer, and also by WithBalancerName dial option. balancerBuilder balancer.Builder // This is to support grpclb. - resolverBuilder resolver.Builder - reqHandshake envconfig.RequireHandshakeSetting - channelzParentID int64 - disableServiceConfig bool - disableRetry bool - disableHealthCheck bool - healthCheckFunc internal.HealthChecker - minConnectTimeout func() time.Duration - defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. - defaultServiceConfigRawJSON *string + resolverBuilder resolver.Builder + reqHandshake envconfig.RequireHandshakeSetting + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker } // DialOption configures how we set up the connection. @@ -168,7 +164,7 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption { // WithCodec returns a DialOption which sets a codec for message marshaling and // unmarshaling. // -// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. func WithCodec(c Codec) DialOption { return WithDefaultCallOptions(CallCustomCodec(c)) } @@ -332,17 +328,14 @@ func WithTimeout(d time.Duration) DialOption { }) } -// WithContextDialer returns a DialOption that sets a dialer to create -// connections. If FailOnNonTempDialError() is set to true, and an error is -// returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f }) } func init() { + internal.WithContextDialer = withContextDialer internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -351,13 +344,11 @@ func init() { // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. -// -// Deprecated: use WithContextDialer instead func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return WithContextDialer( + return withContextDialer( func(ctx context.Context, addr string) (net.Conn, error) { if deadline, ok := ctx.Deadline(); ok { - return f(addr, time.Until(deadline)) + return f(addr, deadline.Sub(time.Now())) } return f(addr, 0) }) @@ -397,10 +388,6 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { - if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) - kp.Time = internal.KeepaliveMinPingTime - } return newFuncDialOption(func(o *dialOptions) { o.copts.KeepaliveParams = kp }) @@ -443,27 +430,12 @@ func WithChannelzParentID(id int64) DialOption { // WithDisableServiceConfig returns a DialOption that causes grpc to ignore any // service config provided by the resolver and provides a hint to the resolver // to not fetch service configs. -// -// Note that, this dial option only disables service config from resolver. If -// default service config is provided, grpc will use the default service config. func WithDisableServiceConfig() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableServiceConfig = true }) } -// WithDefaultServiceConfig returns a DialOption that configures the default -// service config, which will be used in cases where: -// 1. WithDisableServiceConfig is called. -// 2. Resolver does not return service config or if the resolver gets and invalid config. -// -// This API is EXPERIMENTAL. -func WithDefaultServiceConfig(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.defaultServiceConfigRawJSON = &s - }) -} - // WithDisableRetry returns a DialOption that disables retries, even if the // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is @@ -488,8 +460,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { }) } -// WithDisableHealthCheck disables the LB channel health checking for all -// SubConns of this ClientConn. +// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn. // // This API is EXPERIMENTAL. func WithDisableHealthCheck() DialOption { @@ -498,8 +469,8 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. +// withHealthCheckFunc replaces the default health check function with the provided one. It makes +// tests easier to change the health check function. // // For testing purpose only. func withHealthCheckFunc(f internal.HealthChecker) DialOption { @@ -519,14 +490,3 @@ func defaultDialOptions() dialOptions { }, } } - -// withGetMinConnectDeadline specifies the function that clientconn uses to -// get minConnectDeadline. This can be used to make connection attempts happen -// faster/slower. -// -// For testing purpose only. -func withMinConnectDeadline(f func() time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.minConnectTimeout = f - }) -} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 30a75da99d..ade8b7cec7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -102,10 +102,10 @@ func RegisterCodec(codec Codec) { if codec == nil { panic("cannot register a nil Codec") } - if codec.Name() == "" { - panic("cannot register Codec with empty string result for Name()") - } contentSubtype := strings.ToLower(codec.Name()) + if contentSubtype == "" { + panic("cannot register Codec with empty string result for String()") + } registeredCodecs[contentSubtype] = codec } diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 9f3ef3a539..f296dcf4e6 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -2,18 +2,19 @@ module google.golang.org/grpc require ( cloud.google.com/go v0.26.0 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 github.com/golang/protobuf v1.2.0 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 - golang.org/x/net v0.0.0-20190311183353-d8887717615a + github.com/kisielk/gotool v1.0.0 // indirect + golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - golang.org/x/tools v0.0.0-20190311212946-11955173bddd + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 + golang.org/x/text v0.3.0 // indirect + golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 google.golang.org/appengine v1.1.0 // indirect google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 + honnef.co/go/tools v0.0.0-20180728063816-88497007e858 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index b8638ce769..bfb6bb7c0e 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -10,24 +8,25 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1OFf9a5Ul9ETzeYsYv20g= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 51bb9457cd..1fabb11e1b 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,7 +18,7 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in transport package only go to verbose level 2. // All logs in other packages in grpc are logged in spite of the verbosity level. // // In the default logger, diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go deleted file mode 100644 index 3a905d9665..0000000000 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package balancerload defines APIs to parse server loads in trailers. The -// parsed loads are sent to balancers in DoneInfo. -package balancerload - -import ( - "google.golang.org/grpc/metadata" -) - -// Parser converts loads from metadata into a concrete type. -type Parser interface { - // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} -} - -var parser Parser - -// SetParser sets the load parser. -// -// Not mutex-protected, should be called before any gRPC functions. -func SetParser(lr Parser) { - parser = lr -} - -// Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { - if parser == nil { - return nil - } - return parser.Parse(md) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index 4cc2525df7..eb188eae5a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -180,7 +180,7 @@ func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err if s := match[1]; s != "" { msgLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) } return 0, msgLenStr, nil } @@ -195,13 +195,13 @@ func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err if s := match[1]; s != "" { hdrLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) } } if s := match[2]; s != "" { msgLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) } } return hdrLenStr, msgLenStr, nil diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 160f6e8616..b06cdd4d43 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -377,7 +377,10 @@ func metadataKeyOmit(key string) bool { case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. return false } - return strings.HasPrefix(key, "grpc-") + if strings.HasPrefix(key, "grpc-") { + return true + } + return false } func mdToMetadataProto(md metadata.MD) *pb.Metadata { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 11be7cd08c..d2193b3a21 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -34,9 +34,13 @@ const ( type RequireHandshakeSetting int const ( - // RequireHandshakeOn indicates to wait for handshake before considering a - // connection ready/successful. - RequireHandshakeOn RequireHandshakeSetting = iota + // RequireHandshakeHybrid (default, deprecated) indicates to not wait for + // handshake before considering a connection ready, but wait before + // considering successful. + RequireHandshakeHybrid RequireHandshakeSetting = iota + // RequireHandshakeOn (default after the 1.17 release) indicates to wait + // for handshake before considering a connection ready/successful. + RequireHandshakeOn // RequireHandshakeOff indicates to not wait for handshake before // considering a connection ready/successful. RequireHandshakeOff @@ -49,16 +53,18 @@ var ( // environment variable. // // Will be removed after the 1.18 release. - RequireHandshake = RequireHandshakeOn + RequireHandshake RequireHandshakeSetting ) func init() { switch strings.ToLower(os.Getenv(requireHandshakeStr)) { case "on": - fallthrough default: RequireHandshake = RequireHandshakeOn case "off": RequireHandshake = RequireHandshakeOff + case "hybrid": + // Will be removed after the 1.17 release. + RequireHandshake = RequireHandshakeHybrid } } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c1d2c690ca..eaa54d4fc6 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -20,12 +20,11 @@ // symbols to avoid circular dependencies. package internal -import ( - "context" - "time" -) +import "context" var ( + // WithContextDialer is exported by dialoptions.go + WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption // WithResolverBuilder is exported by dialoptions.go WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption // WithHealthCheckFunc is not exported by dialoptions.go @@ -34,9 +33,6 @@ var ( HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) - // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by - // default, but tests may wish to set it lower for convenience. - KeepaliveMinPingTime = 10 * time.Second ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab33..61678feb00 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -22,24 +22,18 @@ package syscall import ( "net" - "sync" "time" "google.golang.org/grpc/grpclog" ) -var once sync.Once - -func log() { - once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") - }) +func init() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") } // GetCPUTime returns the how much CPU time has passed since the start of this process. // It always returns 0 under non-linux or appengine environment. func GetCPUTime() int64 { - log() return 0 } @@ -48,26 +42,22 @@ type Rusage struct{} // GetRusage is a no-op function under non-linux or appengine environment. func GetRusage() (rusage *Rusage) { - log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux or appengine environment. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux or appengine environments func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - log() return nil } // GetTCPUserTimeout is a no-op function under non-linux or appengine environments // a negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { - log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index f2de84d43a..73b41ea7e0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -63,6 +63,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta if _, ok := w.(http.Flusher); !ok { return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } st := &serverHandlerTransport{ rw: w, @@ -173,11 +176,17 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { case <-ht.closedCh: return ErrConnClosing - case ht.writes <- fn: - return nil + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } } } @@ -228,6 +237,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if ht.stats != nil { ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } + close(ht.writes) } ht.Close() return err @@ -305,13 +315,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx, cancel = context.WithCancel(ctx) } - // requestOver is closed when the status has been written via WriteStatus. + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() go func() { select { case <-requestOver: case <-ht.closedCh: - case <-ht.req.Context().Done(): + case <-clientGone: } cancel() ht.Close() @@ -391,7 +407,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace func (ht *serverHandlerTransport) runStream() { for { select { - case fn := <-ht.writes: + case fn, ok := <-ht.writes: + if !ok { + return + } fn() case <-ht.closedCh: return diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 9dee6db61d..babcaee501 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -322,9 +322,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } - if err := t.framer.writer.Flush(); err != nil { - return nil, err - } + t.framer.writer.Flush() go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() @@ -419,7 +417,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. - timeout := time.Until(dl) + timeout := dl.Sub(time.Now()) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } for k, v := range authData { @@ -1140,27 +1138,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } - endStream := frame.StreamEnded() atomic.StoreUint32(&s.bytesReceived, 1) - initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0 - - if !initialHeader && !endStream { - // As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear - // at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. - st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") - t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) - return - } - - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received - // which indicates peer speaking gRPC, we are in gRPC mode. - state.data.isGRPC = !initialHeader + var state decodeState if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false) + // Something wrong. Stops reading even when there is remaining. return } + endStream := frame.StreamEnded() var isHeader bool defer func() { if t.statsHandler != nil { @@ -1179,30 +1165,29 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } }() - // If headers haven't been received yet. - if initialHeader { + if atomic.SwapUint32(&s.headerDone, 1) == 0 { if !endStream { - // Headers frame is ResponseHeader. + // Headers frame is not actually a trailers-only frame. isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = state.encoding + if len(state.mdata) > 0 { + s.header = state.mdata } - close(s.headerChan) - return + } else { + s.noHeaders = true } - // Headers frame is Trailers-only. - s.noHeaders = true close(s.headerChan) } - + if !endStream { + return + } // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1369,8 +1354,6 @@ func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { return &s } -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } - func (t *http2Client) IncrMsgSent() { atomic.AddInt64(&t.czData.msgSent, 1) atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 435092e5c8..df2740398b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -286,9 +286,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } + state := decodeState{serverSide: true} if err := state.decodeHeader(frame); err != nil { if se, ok := status.FromError(err); ok { t.controlBuf.put(&cleanupStream{ @@ -307,16 +305,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( st: t, buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + recvCompress: state.encoding, + method: state.method, + contentSubtype: state.contentSubtype, } if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -329,19 +327,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) } if t.inTapHandle != nil { var err error info := &tap.Info{ - FullMethodName: state.data.method, + FullMethodName: state.method, } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { @@ -437,7 +435,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { - t.closeStream(s, true, se.Code, false) + t.closeStream(s, true, se.Code, nil, false) } else { t.controlBuf.put(&cleanupStream{ streamID: se.StreamID, @@ -579,7 +577,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { } if size > 0 { if err := s.fc.onData(size); err != nil { - t.closeStream(s, true, http2.ErrCodeFlowControl, false) + t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { @@ -604,18 +602,11 @@ func (t *http2Server) handleData(f *http2.DataFrame) { } func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - // If the stream is not deleted from the transport's active streams map, then do a regular close stream. - if s, ok := t.getStream(f); ok { - t.closeStream(s, false, 0, false) + s, ok := t.getStream(f) + if !ok { return } - // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. - t.controlBuf.put(&cleanupStream{ - streamID: f.Header().StreamID, - rst: false, - rstCode: 0, - onWrite: func() {}, - }) + t.closeStream(s, false, 0, nil, false) } func (t *http2Server) handleSettings(f *http2.SettingsFrame) { @@ -779,7 +770,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { if err != nil { return err } - t.closeStream(s, true, http2.ErrCodeInternal, false) + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) return ErrHeaderListSizeLimitViolation } if t.stats != nil { @@ -843,12 +834,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { if err != nil { return err } - t.closeStream(s, true, http2.ErrCodeInternal, false) + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) return ErrHeaderListSizeLimitViolation } - // Send a RST_STREAM after the trailers if the client has not already half-closed. - rst := s.getState() == streamActive - t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } @@ -860,9 +849,6 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } // TODO(mmukhi, dfawley): Make sure this is the right code to return. return status.Errorf(codes.Internal, "transport: %v", err) } @@ -1018,65 +1004,45 @@ func (t *http2Server) Close() error { return err } -// deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) { - oldState = s.swapState(streamDone) - if oldState == streamDone { +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + if s.swapState(streamDone) == streamDone { // If the stream was already done, return. - return oldState + return } - // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. s.cancel() - - t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { - delete(t.activeStreams, s.id) - if len(t.activeStreams) == 0 { - t.idle = time.Now() - } - } - t.mu.Unlock() - - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } - - return oldState -} - -// finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { - oldState := t.deleteStream(s, eosReceived) - // If the stream is already closed, then don't put trailing header to controlbuf. - if oldState == streamDone { - return - } - - hdr.cleanup = &cleanupStream{ + cleanup := &cleanupStream{ streamID: s.id, rst: rst, rstCode: rstCode, - onWrite: func() {}, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + } + if hdr != nil { + hdr.cleanup = cleanup + t.controlBuf.put(hdr) + } else { + t.controlBuf.put(cleanup) } - t.controlBuf.put(hdr) -} - -// closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { - t.deleteStream(s, eosReceived) - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: rst, - rstCode: rstCode, - onWrite: func() {}, - }) } func (t *http2Server) RemoteAddr() net.Addr { @@ -1189,7 +1155,7 @@ func (t *http2Server) IncrMsgRecv() { } func (t *http2Server) getOutFlowWindow() int64 { - resp := make(chan uint32, 1) + resp := make(chan uint32) timer := time.NewTimer(time.Second) defer timer.Stop() t.controlBuf.put(&outFlowControlSizeRequest{resp}) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 9d212867ce..77a2cfaaef 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -78,8 +78,7 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } - // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. - HTTPStatusConvTab = map[int]codes.Code{ + httpStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. http.StatusBadRequest: codes.Internal, // 401 Unauthorized - UNAUTHENTICATED. @@ -99,7 +98,9 @@ var ( } ) -type parsedHeaderData struct { +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { encoding string // statusGen caches the stream status received from the trailer the server // sent. Client side only. Do not access directly. After all trailers are @@ -119,30 +120,8 @@ type parsedHeaderData struct { statsTags []byte statsTrace []byte contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { // whether decoding on server side or not serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -223,11 +202,11 @@ func contentType(contentSubtype string) string { } func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { + if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) } - return d.data.statusGen + return d.statusGen } const binHdrSuffix = "-bin" @@ -265,146 +244,113 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { if frame.Truncated { return status.Error(codes.Internal, "peer header list size exceeded limit") } - for _, hf := range frame.Fields { - d.processHeaderField(hf) + if err := d.processHeaderField(hf); err != nil { + return err + } } - if d.data.isGRPC { - if d.data.grpcErr != nil { - return d.data.grpcErr - } - if d.serverSide { - return nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } + if d.serverSide { return nil } - // HTTP fallback mode - if d.data.httpErr != nil { - return d.data.httpErr + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil } - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] if !ok { code = codes.Unknown } - } - - return status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") + return status.Error(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil } func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) + if d.mdata == nil { + d.mdata = make(map[string][]string) } - d.data.mdata[k] = append(d.data.mdata[k], v) + d.mdata[k] = append(d.mdata[k], v) } -func (d *decodeState) processHeaderField(f hpack.HeaderField) { +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": contentSubtype, validContentType := contentSubtype(f.Value) if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return + return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) } - d.data.contentSubtype = contentSubtype + d.contentSubtype = contentSubtype // TODO: do we want to propagate the whole content-type in the metadata, // or come up with a way to just propagate the content-subtype if it was set? // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} // in the metadata? d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true case "grpc-encoding": - d.data.encoding = f.Value + d.encoding = f.Value case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.data.rawStatusCode = &code + d.rawStatusCode = &code case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } s := &spb.Status{} if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } - d.data.statusGen = status.FromProto(s) + d.statusGen = status.FromProto(s) case "grpc-timeout": - d.data.timeoutSet = true + d.timeoutSet = true var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) } case ":path": - d.data.method = f.Value + d.method = f.Value case ":status": code, err := strconv.Atoi(f.Value) if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) } - d.data.httpStatus = &code + d.httpStatus = &code case "grpc-tags-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) } - d.data.statsTags = v + d.statsTags = v d.addMetadata(f.Name, string(v)) case "grpc-trace-bin": v, err := decodeBinHeader(f.Value) if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return + return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) } - d.data.statsTrace = v + d.statsTrace = v d.addMetadata(f.Name, string(v)) default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { @@ -413,10 +359,11 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - return + return nil } d.addMetadata(f.Name, v) } + return nil } type timeoutUnit uint8 diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7f82cbb080..2580aa7d3b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -327,7 +327,8 @@ func (s *Stream) TrailersOnly() (bool, error) { if err != nil { return false, err } - return s.noHeaders, nil + // if !headerDone, some other connection error occurred. + return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -610,9 +611,6 @@ type ClientTransport interface { // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d..899e72d7a4 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -33,7 +33,6 @@ import ( type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. - // If set below 10s, a minimum value of 10s will be used instead. Time time.Duration // The current default value is infinity. // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is @@ -63,7 +62,6 @@ type ServerParameters struct { MaxConnectionAgeGrace time.Duration // The current default value is infinity. // After a duration of this time if the server doesn't see any activity it // pings the client to see if the transport is still alive. - // If set below 1s, a minimum value of 1s will be used instead. Time time.Duration // The current default value is 2 hours. // After having pinged for keepalive check, the server waits for a duration // of Timeout and if no activity is seen even after that the connection is diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go index c9f79dc533..fd8cd3b5a4 100644 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -76,8 +76,8 @@ func formatIP(addr string) (addrIP string, ok bool) { // parseTarget takes the user input target string, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. // examples: // target: "www.google.com" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" returns host: "ipv4-host", port: "80" @@ -221,7 +221,7 @@ func (w *dnsWatcher) lookupSRV() map[string]*Update { for _, s := range srvs { lbAddrs, err := lookupHost(w.ctx, s.Target) if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) continue } for _, a := range lbAddrs { diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c99fdbef43..8cc39e9375 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -17,7 +17,7 @@ */ // Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be subject to change. +// The interface is EXPERIMENTAL and may be suject to change. // // Deprecated: please use package resolver. package naming diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index f9625496c4..14f915d676 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -101,7 +101,10 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - var ch chan struct{} + var ( + p balancer.Picker + ch chan struct{} + ) for { bp.mu.Lock() @@ -127,7 +130,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } ch = bp.blockingCh - p := bp.picker + p = bp.picker bp.mu.Unlock() subConn, done, err := p.Pick(ctx, opts) @@ -141,22 +144,15 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. continue } return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return nil, nil, status.Error(codes.Canceled, err.Error()) default: - if _, ok := status.FromError(err); ok { - return nil, nil, err - } // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + return nil, nil, toRPCErr(err) } } acw, ok := subConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { @@ -165,11 +161,6 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } return t, done, nil } - if done != nil { - // Calling done with nil error, no bytes sent and no bytes received. - // DoneInfo with default value works. - done(balancer.DoneInfo{}) - } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 5835599077..f33189fede 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -47,8 +47,6 @@ const ( defaultFreq = time.Minute * 30 defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. - txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" @@ -284,7 +282,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address { } func (d *dnsResolver) lookupTXT() string { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + ss, err := d.resolver.LookupTXT(d.ctx, d.host) if err != nil { grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) return "" @@ -348,8 +346,8 @@ func formatIP(addr string) (addrIP string, ok bool) { // parseTarget takes the user input target string and default port, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go index 893d5d12cb..b76010d74d 100644 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -45,7 +45,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 52ec603daa..145cf477ed 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -98,15 +98,6 @@ type BuildOption struct { DisableServiceConfig bool } -// State contains the current Resolver state relevant to the ClientConn. -type State struct { - Addresses []Address // Resolved addresses for the target - ServiceConfig string // JSON representation of the service config - - // TODO: add Err error - // TODO: add ParsedServiceConfig interface{} -} - // ClientConn contains the callbacks for resolver to notify any updates // to the gRPC ClientConn. // @@ -115,18 +106,12 @@ type State struct { // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. type ClientConn interface { - // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. - // - // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) // NewServiceConfig is called by resolver to notify ClientConn a new // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. NewServiceConfig(serviceConfig string) } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index e9cef3a92b..50991eafb9 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -21,7 +21,6 @@ package grpc import ( "fmt" "strings" - "sync/atomic" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" @@ -31,12 +30,12 @@ import ( // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConnection interface. type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done uint32 // accessed atomically; set to 1 when closed. - curState resolver.State + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} + lastAddressesCount int } // split2 returns the values from strings.SplitN(s, sep, 2). @@ -83,6 +82,7 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { cc: cc, addrCh: make(chan []resolver.Address, 1), scCh: make(chan string, 1), + done: make(chan struct{}), } var err error @@ -99,67 +99,57 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { func (ccr *ccResolverWrapper) close() { ccr.resolver.Close() - atomic.StoreUint32(&ccr.done, 1) + close(ccr.done) } -func (ccr *ccResolverWrapper) isDone() bool { - return atomic.LoadUint32(&ccr.done) == 1 -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.isDone() { - return - } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } - ccr.cc.updateResolverState(s) - ccr.curState = s -} - -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.isDone() { + select { + case <-ccr.done: return + default: } grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.addChannelzTraceEvent(addrs) } - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState) + ccr.cc.handleResolvedAddrs(addrs, nil) } -// NewServiceConfig is called by the resolver implementation to send service +// NewServiceConfig is called by the resolver implemenetion to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.isDone() { + select { + case <-ccr.done: return + default: } grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc}) - } - ccr.curState.ServiceConfig = sc - ccr.cc.updateResolverState(ccr.curState) + ccr.cc.handleServiceConfig(sc) } -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) { - return - } - var updates []string - if s.ServiceConfig != ccr.curState.ServiceConfig { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") +func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) { + if len(addrs) == 0 && ccr.lastAddressesCount != 0 { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Resolver returns an empty address list", + Severity: channelz.CtWarning, + }) + } else if len(addrs) != 0 && ccr.lastAddressesCount == 0 { + var s string + for i, a := range addrs { + if a.ServerName != "" { + s += a.Addr + "(" + a.ServerName + ")" + } else { + s += a.Addr + } + if i != len(addrs)-1 { + s += " " + } + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s), + Severity: channelz.CtINFO, + }) } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtINFO, - }) + ccr.lastAddressesCount = len(addrs) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2a595622d3..8d0d3dc8c9 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -370,13 +370,13 @@ func (o CompressorCallOption) after(c *callInfo) {} // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. // -// If ForceCodec is not also used, the content-subtype will be used to look up -// the Codec to use in the registry controlled by RegisterCodec. See the -// documentation on RegisterCodec for details on registration. The lookup of -// content-subtype is case-insensitive. If no such Codec is found, the call +// If CallCustomCodec is not also used, the content-subtype will be used to +// look up the Codec to use in the registry controlled by RegisterCodec. See +// the documentation on RegisterCodec for details on registration. The lookup +// of content-subtype is case-insensitive. If no such Codec is found, the call // will result in an error with code codes.Internal. // -// If ForceCodec is also used, that Codec will be used for all request and +// If CallCustomCodec is also used, that Codec will be used for all request and // response messages, with the content-subtype set to the given contentSubtype // here for requests. func CallContentSubtype(contentSubtype string) CallOption { @@ -396,7 +396,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo) {} -// ForceCodec returns a CallOption that will set the given Codec to be +// CallCustomCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling // String() will be used as the content-subtype in a case-insensitive manner. // @@ -408,37 +408,12 @@ func (o ContentSubtypeCallOption) after(c *callInfo) {} // // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. -// -// This is an EXPERIMENTAL API. -func ForceCodec(codec encoding.Codec) CallOption { - return ForceCodecCallOption{Codec: codec} -} - -// ForceCodecCallOption is a CallOption that indicates the codec used for -// marshaling messages. -// -// This is an EXPERIMENTAL API. -type ForceCodecCallOption struct { - Codec encoding.Codec -} - -func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec - return nil -} -func (o ForceCodecCallOption) after(c *callInfo) {} - -// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of -// an encoding.Codec. -// -// Deprecated: use ForceCodec instead. func CallCustomCodec(codec Codec) CallOption { return CustomCodecCallOption{Codec: codec} } // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. -// // This is an EXPERIMENTAL API. type CustomCodecCallOption struct { Codec Codec @@ -661,9 +636,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = ioutil.ReadAll(dcReader) if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8115828fdf..d705d7a80c 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -182,11 +182,6 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second - } - return func(o *options) { o.keepaliveParams = kp } @@ -249,7 +244,7 @@ func MaxRecvMsgSize(m int) ServerOption { } // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. -// If this is not set, gRPC uses the default `math.MaxInt32`. +// If this is not set, gRPC uses the default 4MB. func MaxSendMsgSize(m int) ServerOption { return func(o *options) { o.maxSendMessageSize = m @@ -614,13 +609,12 @@ func (s *Server) handleRawConn(rawConn net.Conn) { rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -749,13 +743,12 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea trInfo = &traceInfo{ tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) + trInfo.firstLine.deadline = dl.Sub(time.Now()) } return trInfo } @@ -861,6 +854,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if trInfo != nil { defer trInfo.tr.Finish() + trInfo.firstLine.client = false trInfo.tr.LazyLog(&trInfo.firstLine, false) defer func() { if err != nil && err != io.EOF { @@ -880,7 +874,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. PeerAddr: nil, } if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) + logEntry.Timeout = deadline.Sub(time.Now()) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } @@ -1112,7 +1106,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp PeerAddr: nil, } if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) + logEntry.Timeout = deadline.Sub(time.Now()) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } @@ -1246,8 +1240,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - srv, knownService := s.m[service] - if knownService { + if srv, ok := s.m[service]; ok { if md, ok := srv.md[method]; ok { s.processUnaryRPC(t, stream, srv, md, trInfo) return @@ -1262,16 +1255,11 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } - var errDesc string - if !knownService { - errDesc = fmt.Sprintf("unknown service %v", service) - } else { - errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) - } if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.SetError() } + errDesc := fmt.Sprintf("unknown service %v", service) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 1c5227426f..162857e204 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -99,9 +99,6 @@ type ServiceConfig struct { // healthCheckConfig must be set as one of the requirement to enable LB channel // health check. healthCheckConfig *healthCheckConfig - // rawJSONString stores service config json string that get parsed into - // this service config struct. - rawJSONString string } // healthCheckConfig defines the go-native version of the LB channel health check config. @@ -241,22 +238,24 @@ type jsonSC struct { HealthCheckConfig *healthCheckConfig } -func parseServiceConfig(js string) (*ServiceConfig, error) { +func parseServiceConfig(js string) (ServiceConfig, error) { + if len(js) == 0 { + return ServiceConfig{}, fmt.Errorf("no JSON service config provided") + } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return ServiceConfig{}, err } sc := ServiceConfig{ LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, - rawJSONString: js, } if rsc.MethodConfig == nil { - return &sc, nil + return sc, nil } for _, m := range *rsc.MethodConfig { @@ -266,7 +265,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { d, err := parseDuration(m.Timeout) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return ServiceConfig{}, err } mc := MethodConfig{ @@ -275,7 +274,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return ServiceConfig{}, err } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { @@ -300,13 +299,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { if sc.retryThrottling != nil { if sc.retryThrottling.MaxTokens <= 0 || - sc.retryThrottling.MaxTokens > 1000 || + sc.retryThrottling.MaxTokens >= 1000 || sc.retryThrottling.TokenRatio <= 0 { // Illegal throttling config; disable throttling. sc.retryThrottling = nil } } - return &sc, nil + return sc, nil } func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f3f593c844..84f77dafa5 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -27,8 +27,6 @@ import ( "context" "net" "time" - - "google.golang.org/grpc/metadata" ) // RPCStats contains stats information about RPCs. @@ -174,9 +172,6 @@ type End struct { BeginTime time.Time // EndTime is the time when the RPC ends. EndTime time.Time - // Trailer contains the trailer metadata received from the server. This - // field is only valid if this End is from the client side. - Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using // status.FromError if non-nil. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 6e2bf51e0a..d06279a209 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/encoding" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" @@ -231,16 +230,12 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo + var trInfo traceInfo if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) + trInfo.firstLine.deadline = deadline.Sub(time.Now()) } trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) @@ -302,7 +297,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Authority: cs.cc.authority, } if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) + logEntry.Timeout = deadline.Sub(time.Now()) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } @@ -328,7 +323,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error { +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error { cs.attempt = &csAttempt{ cs: cs, dc: cs.cc.dopts.dc, @@ -343,9 +338,6 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) er if err != nil { return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) - } cs.attempt.t = t cs.attempt.done = done return nil @@ -422,10 +414,9 @@ type csAttempt struct { decompSet bool mu sync.Mutex // guards trInfo.tr - // trInfo may be nil (if EnableTracing is false). // trInfo.tr is set when created (if EnableTracing is true), // and cleared when the finish method is called. - trInfo *traceInfo + trInfo traceInfo statsHandler stats.Handler } @@ -549,7 +540,7 @@ func (cs *clientStream) retryLocked(lastErr error) error { cs.commitAttemptLocked() return err } - if err := cs.newAttemptLocked(nil, nil); err != nil { + if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil { return err } if lastErr = cs.replayBufferLocked(); lastErr == nil { @@ -820,7 +811,7 @@ func (cs *clientStream) finish(err error) { func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { cs := a.cs - if a.trInfo != nil { + if EnableTracing { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) @@ -877,7 +868,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return toRPCErr(err) } - if a.trInfo != nil { + if EnableTracing { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) @@ -890,9 +881,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -925,23 +915,22 @@ func (a *csAttempt) finish(err error) { // Ending a stream with EOF indicates a success. err = nil } - var tr metadata.MD if a.s != nil { a.t.CloseStream(a.s, err) - tr = a.s.Trailer() } if a.done != nil { br := false + var tr metadata.MD if a.s != nil { br = a.s.BytesReceived() + tr = a.s.Trailer() } a.done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, BytesReceived: br, - ServerLoad: balancerload.Parse(tr), }) } if a.statsHandler != nil { @@ -949,12 +938,11 @@ func (a *csAttempt) finish(err error) { Client: true, BeginTime: a.cs.beginTime, EndTime: time.Now(), - Trailer: tr, Error: err, } a.statsHandler.HandleRPC(a.cs.ctx, end) } - if a.trInfo != nil && a.trInfo.tr != nil { + if a.trInfo.tr != nil { if err == nil { a.trInfo.tr.LazyPrintf("RPC: [OK]") } else { @@ -1098,6 +1086,7 @@ type addrConnStream struct { dc Decompressor decomp encoding.Compressor p *parser + done func(balancer.DoneInfo) mu sync.Mutex finished bool } @@ -1478,9 +1467,8 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), }) } if ss.binlog != nil { diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b99948..c1c96dedcb 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -24,7 +24,6 @@ import ( "io" "net" "strings" - "sync" "time" "golang.org/x/net/trace" @@ -54,25 +53,13 @@ type traceInfo struct { } // firstLine is the first line of an RPC trace. -// It may be mutated after construction; remoteAddr specifically may change -// during client-side use. type firstLine struct { - mu sync.Mutex client bool // whether this is a client (outgoing) RPC remoteAddr net.Addr deadline time.Duration // may be zero } -func (f *firstLine) SetRemoteAddr(addr net.Addr) { - f.mu.Lock() - f.remoteAddr = addr - f.mu.Unlock() -} - func (f *firstLine) String() string { - f.mu.Lock() - defer f.mu.Unlock() - var line bytes.Buffer io.WriteString(&line, "RPC: ") if f.client { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 092e088258..45eace590f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.20.1" +const Version = "1.18.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bd5c8de90c..94a50640b5 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -13,19 +13,26 @@ die() { exit 1 } -fail_on_output() { - tee /dev/stderr | (! read) -} - # Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +if [[ -d "${GOPATH}/src" ]]; then + die "\${GOPATH}/src (${GOPATH}/src) exists; this script will delete it." +fi # Undo any edits made by this script. cleanup() { + rm -rf "${GOPATH}/src" git reset --hard HEAD } trap cleanup EXIT +fail_on_output() { + tee /dev/stderr | (! read) +} + PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" if [[ "$1" = "-install" ]]; then @@ -86,7 +93,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output +goimports -l . 2>&1 | fail_on_output golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") go tool vet -all . @@ -105,25 +112,30 @@ if go help mod >& /dev/null; then fi # - Collection of static analysis checks +### HACK HACK HACK: Remove once staticcheck works with modules. +# Make a symlink in ${GOPATH}/src to its ${GOPATH}/pkg/mod equivalent for every package we use. +for x in $(find "${GOPATH}/pkg/mod" -name '*@*' | grep -v \/mod\/cache\/); do + pkg="$(echo ${x#"${GOPATH}/pkg/mod/"} | cut -f1 -d@)"; + # If multiple versions exist, just use the existing one. + if [[ -L "${GOPATH}/src/${pkg}" ]]; then continue; fi + mkdir -p "$(dirname "${GOPATH}/src/${pkg}")"; + ln -s $x "${GOPATH}/src/${pkg}"; +done +### END HACK HACK HACK + # TODO(menghanl): fix errors in transport_test. -staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' -google.golang.org/grpc/balancer.go:SA1019 -google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 -google.golang.org/grpc/balancer/xds/edsbalancer/balancergroup.go:SA1019 -google.golang.org/grpc/balancer/xds/xds.go:SA1019 -google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 -google.golang.org/grpc/balancer_test.go:SA1019 -google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 -google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 -google.golang.org/grpc/clientconn.go:S1024 -google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 -google.golang.org/grpc/clientconn_test.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 -google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 -google.golang.org/grpc/stats/stats_test.go:SA1019 -google.golang.org/grpc/test/channelz_test.go:SA1019 -google.golang.org/grpc/test/end2end_test.go:SA1019 -google.golang.org/grpc/test/healthcheck_test.go:SA1019 +staticcheck -go 1.9 -ignore ' +balancer.go:SA1019 +balancer_test.go:SA1019 +clientconn_test.go:SA1019 +balancer/roundrobin/roundrobin_test.go:SA1019 +benchmark/benchmain/main.go:SA1019 +internal/transport/handler_server.go:SA1019 +internal/transport/handler_server_test.go:SA1019 +internal/transport/transport_test.go:SA2002 +stats/stats_test.go:SA1019 +test/channelz_test.go:SA1019 +test/end2end_test.go:SA1019 +test/healthcheck_test.go:SA1019 ' ./... misspell -error . diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 072e7392b1..438eb3beda 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -70,11 +70,6 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string - // Path to a file containing a BearerToken. - // If set, the contents are periodically read. - // The last successfully read value takes precedence over BearerToken. - BearerTokenFile string - // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -327,8 +322,9 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - token, err := ioutil.ReadFile(tokenFile) - if err != nil { + ts := NewCachedFileTokenSource(tokenFile) + + if _, err := ts.Token(); err != nil { return nil, err } @@ -344,8 +340,7 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - BearerToken: string(token), - BearerTokenFile: tokenFile, + WrapTransport: TokenSourceWrapTransport(ts), }, nil } @@ -435,13 +430,12 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, - BearerTokenFile: config.BearerTokenFile, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/rest/token_source.go similarity index 87% rename from vendor/k8s.io/client-go/transport/token_source.go rename to vendor/k8s.io/client-go/rest/token_source.go index 8595df2716..c251b5eb0b 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/rest/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package transport +package rest import ( "fmt" @@ -47,14 +47,14 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, - leeway: 10 * time.Second, + leeway: 1 * time.Minute, base: &fileTokenSource{ path: path, - // This period was picked because it is half of the duration between when the kubelet - // refreshes a projected service account token and when the original token expires. - // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. - // This should induce re-reading at a frequency that works with the token volume source. - period: time.Minute, + // This period was picked because it is half of the minimum validity + // duration for a token provisioned by they TokenRequest API. This is + // unsophisticated and should induce rotation at a frequency that should + // work with the token volume source. + period: 5 * time.Minute, }, } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index a7b8c1c6e4..dea229c918 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -229,12 +229,11 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) - if err != nil { + ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile) + if _, err := ts.Token(); err != nil { return nil, err } - mergedConfig.BearerToken = string(tokenBytes) - mergedConfig.BearerTokenFile = configAuthInfo.TokenFile + mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts) } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index acb126d8b0..4081c23e7f 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -39,11 +39,6 @@ type Config struct { // Bearer token for authentication BearerToken string - // Path to a file containing a BearerToken. - // If set, the contents are periodically read. - // The last successfully read value takes precedence over BearerToken. - BearerTokenFile string - // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig @@ -85,7 +80,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 + return len(c.BearerToken) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8c4d..da417cf96e 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,7 +22,6 @@ import ( "strings" "time" - "golang.org/x/oauth2" "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -45,11 +44,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - var err error - rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) - if err != nil { - return nil, err - } + rt = NewBearerAuthRoundTripper(config.BearerToken, rt) case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -270,35 +265,13 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r type bearerAuthRoundTripper struct { bearer string - source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, nil, rt} -} - -// NewBearerAuthRoundTripper adds the provided bearer token to a request -// unless the authorization header has already been set. -// If tokenFile is non-empty, it is periodically read, -// and the last successfully read content is used as the bearer token. -// If tokenFile is non-empty and bearer is empty, the tokenFile is read -// immediately to populate the initial bearer token. -func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { - if len(tokenFile) == 0 { - return &bearerAuthRoundTripper{bearer, nil, rt}, nil - } - source := NewCachedFileTokenSource(tokenFile) - if len(bearer) == 0 { - token, err := source.Token() - if err != nil { - return nil, err - } - bearer = token.AccessToken - } - return &bearerAuthRoundTripper{bearer, source, rt}, nil + return &bearerAuthRoundTripper{bearer, rt} } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -307,13 +280,7 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - token := rt.bearer - if rt.source != nil { - if refreshedToken, err := rt.source.Token(); err == nil { - token = refreshedToken.AccessToken - } - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) return rt.rt.RoundTrip(req) } diff --git a/vendor/k8s.io/cluster-bootstrap/token/util/helpers.go b/vendor/k8s.io/cluster-bootstrap/token/util/helpers.go index 5565cb26d8..f9ea35b5ee 100644 --- a/vendor/k8s.io/cluster-bootstrap/token/util/helpers.go +++ b/vendor/k8s.io/cluster-bootstrap/token/util/helpers.go @@ -27,6 +27,8 @@ import ( "k8s.io/cluster-bootstrap/token/api" ) +// TODO(dixudx): refactor this to util/secrets and util/tokens + // validBootstrapTokenChars defines the characters a bootstrap token can consist of const validBootstrapTokenChars = "0123456789abcdefghijklmnopqrstuvwxyz" @@ -110,6 +112,7 @@ func BootstrapTokenSecretName(tokenID string) string { // ValidateBootstrapGroupName checks if the provided group name is a valid // bootstrap group name. Returns nil if valid or a validation error if invalid. +// TODO(dixudx): should be moved to util/secrets func ValidateBootstrapGroupName(name string) error { if BootstrapGroupRegexp.Match([]byte(name)) { return nil diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go index 40f1306d5d..4548108e8b 100644 --- a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go +++ b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go @@ -718,7 +718,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { } if !ut.Key.IsAssignable() { - klog.Fatalf("Hit an unsupported type %v for: %v", uet, t) + klog.Fatalf("Hit an unsupported type %v.", uet) } sw.Do("*out = make($.|raw$, len(*in))\n", t) @@ -745,10 +745,6 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { case uet.IsAssignable(): sw.Do("(*out)[key] = val\n", nil) case uet.Kind == types.Interface: - // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function - if uet.Name.Name == "interface{}" { - klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) - } sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -765,7 +761,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { case uet.Kind == types.Struct: sw.Do("(*out)[key] = *val.DeepCopy()\n", uet) default: - klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + klog.Fatalf("Hit an unsupported type %v.", uet) } sw.Do("}\n", nil) } @@ -797,10 +793,6 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { g.generateFor(ut.Elem, sw) sw.Do("}\n", nil) } else if uet.Kind == types.Interface { - // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function - if uet.Name.Name == "interface{}" { - klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) - } sw.Do("if (*in)[i] != nil {\n", nil) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -810,7 +802,7 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { } else if uet.Kind == types.Struct { sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) } else { - klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + klog.Fatalf("Hit an unsupported type %v.", uet) } sw.Do("}\n", nil) } @@ -871,10 +863,6 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) } case uft.Kind == types.Interface: - // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function - if uft.Name.Name == "interface{}" { - klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uft.Name.Name) - } sw.Do("if in.$.name$ != nil {\n", args) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -882,7 +870,7 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args) sw.Do("}\n", nil) default: - klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t) + klog.Fatalf("Hit an unsupported type %v.", uft) } } } @@ -919,6 +907,6 @@ func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) { sw.Do("*out = new($.Elem|raw$)\n", ut) sw.Do("(*in).DeepCopyInto(*out)\n", nil) default: - klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + klog.Fatalf("Hit an unsupported type %v.", uet) } } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index 0f508dae66..5677664c21 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -5,11 +5,12 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index bee306f398..841468b4b6 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -31,7 +31,7 @@ How to use klog - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 0000000000..3877d8546a --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 0000000000..fb64d277a7 --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 10330d7ef7..2520ebdaa7 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,17 +20,17 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. @@ -417,6 +417,7 @@ func InitFlags(flagset *flag.FlagSet) { logging.toStderr = true logging.alsoToStderr = false logging.skipHeaders = false + logging.addDirHeader = false logging.skipLogHeaders = false }) @@ -432,6 +433,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") @@ -500,6 +502,9 @@ type loggingT struct { // If true, do not add the headers to log files skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -585,9 +590,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -736,6 +746,8 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, @@ -746,6 +758,8 @@ func SetOutput(w io.Writer) { // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) @@ -771,24 +785,38 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -827,7 +855,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -838,7 +866,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -1094,9 +1122,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1170,7 +1198,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1194,7 +1222,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1218,7 +1246,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1244,7 +1272,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS deleted file mode 100644 index 9621a6a3a4..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -approvers: -- apelisse diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5eb957affb..890a39399f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -92,16 +92,13 @@ func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { // We believe the schema is a reference, verify that and returns a new // Schema func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { - // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. if len(s.GetProperties().GetAdditionalProperties()) > 0 { return nil, newSchemaError(path, "unallowed embedded type definition") } - // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. if len(s.GetType().GetValue()) > 0 { return nil, newSchemaError(path, "definition reference can't have a type") } - // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) } @@ -130,7 +127,6 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) return nil, newSchemaError(path, "invalid object type") } var sub Schema - // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. if s.GetAdditionalProperties().GetSchema() == nil { sub = &Arbitrary{ BaseSchema: d.parseBaseSchema(s, path), @@ -161,7 +157,6 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, case Number: // do nothing case Integer: // do nothing case Boolean: // do nothing - // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. default: return nil, newSchemaError(path, "Unknown primitive type: %q", t) } @@ -180,8 +175,6 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro return nil, newSchemaError(path, `array should have type "array"`) } if len(s.GetItems().GetSchema()) != 1 { - // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. - // TODO(wrong): "type: array" witohut any items at all is completely valid. return nil, newSchemaError(path, "array should have exactly one sub-item") } sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) @@ -234,8 +227,6 @@ func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { if s.GetXRef() != "" { - // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered - // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object return d.parseReference(s, path) } objectTypes := s.GetType().GetValue() @@ -243,15 +234,11 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err case 0: // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include // the type:object property (they only included the "properties" property), so we need to handle this case - // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. if s.GetProperties() != nil { - // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. - // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given return d.parseKind(s, path) } else { // Definition has no type and no properties. Treat it as an arbitrary value - // TODO(incomplete): what if it has additionalProperties=false or patternProperties? - // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). + // TODO: what if it has additionalProperties or patternProperties? return d.parseArbitrary(s, path) } case 1: @@ -269,8 +256,6 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err return d.parsePrimitive(s, path) default: // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types - // TODO(wrong): this is rejecting a completely valid OpenAPI spec - // TODO(CRD validation schema publishing): filter these out return nil, newSchemaError(path, "definitions with multiple types aren't supported") } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 5f7d70f267..5080661ff3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,18 +1,14 @@ -# cloud.google.com/go v0.40.0 +# cloud.google.com/go v0.35.1 cloud.google.com/go/compute/metadata -# contrib.go.opencensus.io/exporter/ocagent v0.4.12 +# contrib.go.opencensus.io/exporter/ocagent v0.2.0 contrib.go.opencensus.io/exporter/ocagent -# github.com/Azure/go-autorest/autorest v0.5.0 +# github.com/Azure/go-autorest v11.5.0+incompatible github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.2.0 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/date v0.1.0 -github.com/Azure/go-autorest/autorest/date -# github.com/Azure/go-autorest/logger v0.1.0 +github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/logger -# github.com/Azure/go-autorest/tracing v0.1.0 github.com/Azure/go-autorest/tracing +github.com/Azure/go-autorest/autorest/date # github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 github.com/ajeddeloh/go-json # github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd @@ -21,15 +17,13 @@ github.com/ajeddeloh/yaml github.com/alecthomas/units # github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 github.com/appscode/jsonpatch -# github.com/beorn7/perks v1.0.0 +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile -# github.com/census-instrumentation/opencensus-proto v0.2.0 +# github.com/census-instrumentation/opencensus-proto v0.1.0 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 # github.com/coreos/container-linux-config-transpiler v0.9.0 github.com/coreos/container-linux-config-transpiler/config github.com/coreos/container-linux-config-transpiler/config/astyaml @@ -38,11 +32,11 @@ github.com/coreos/container-linux-config-transpiler/config/types github.com/coreos/container-linux-config-transpiler/config/templating github.com/coreos/container-linux-config-transpiler/config/types/util github.com/coreos/container-linux-config-transpiler/internal/util -# github.com/coreos/go-semver v0.3.0 +# github.com/coreos/go-semver v0.2.0 github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e +# github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d github.com/coreos/go-systemd/unit -# github.com/coreos/ignition v0.32.0 +# github.com/coreos/ignition v0.33.0 github.com/coreos/ignition/config/v2_2/types github.com/coreos/ignition/config/validate github.com/coreos/ignition/config/validate/astnode @@ -58,35 +52,31 @@ github.com/dgrijalva/jwt-go github.com/ghodss/yaml # github.com/go-logr/logr v0.1.0 github.com/go-logr/logr -# github.com/go-logr/zapr v0.1.1 +# github.com/go-logr/zapr v0.1.0 github.com/go-logr/zapr -# github.com/gobuffalo/envy v1.7.0 +# github.com/gobuffalo/envy v1.6.12 github.com/gobuffalo/envy -# github.com/gogo/protobuf v1.2.1 +# github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 +github.com/gobuffalo/flect +# github.com/gogo/protobuf v1.2.0 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef +# github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.3.1 +# github.com/golang/protobuf v1.2.0 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -github.com/golang/protobuf/jsonpb -github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/ptypes/struct -github.com/golang/protobuf/protoc-gen-go/descriptor -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap -github.com/golang/protobuf/protoc-gen-go/plugin # github.com/google/btree v1.0.0 github.com/google/btree # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid -# github.com/googleapis/gnostic v0.3.0 +# github.com/googleapis/gnostic v0.2.0 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions @@ -125,14 +115,10 @@ github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies github.com/gophercloud/gophercloud/openstack/identity/v2/tenants # github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 github.com/gophercloud/utils/openstack/clientconfig -# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 +# github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f github.com/gregjones/httpcache github.com/gregjones/httpcache/diskcache -# github.com/grpc-ecosystem/grpc-gateway v1.8.5 -github.com/grpc-ecosystem/grpc-gateway/runtime -github.com/grpc-ecosystem/grpc-gateway/utilities -github.com/grpc-ecosystem/grpc-gateway/internal -# github.com/hashicorp/golang-lru v0.5.1 +# github.com/hashicorp/golang-lru v0.5.0 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/imdario/mergo v0.3.7 @@ -143,15 +129,13 @@ github.com/inconshreveable/mousetrap github.com/joho/godotenv # github.com/json-iterator/go v1.1.6 github.com/json-iterator/go -# github.com/markbates/inflect v1.0.4 -github.com/markbates/inflect # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/onsi/gomega v1.4.3 +# github.com/onsi/gomega v1.5.0 github.com/onsi/gomega/gbytes github.com/onsi/gomega/gexec github.com/onsi/gomega/format @@ -172,63 +156,62 @@ github.com/pborman/uuid github.com/peterbourgon/diskv # github.com/pkg/errors v0.8.1 github.com/pkg/errors -# github.com/prometheus/client_golang v0.9.4 +# github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal -# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 +# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/client_model/go -# github.com/prometheus/common v0.4.1 +# github.com/prometheus/common v0.1.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -# github.com/prometheus/procfs v0.0.2 +# github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 github.com/prometheus/procfs -github.com/prometheus/procfs/internal/fs -# github.com/rogpeppe/go-internal v1.3.0 +github.com/prometheus/procfs/nfs +github.com/prometheus/procfs/xfs +github.com/prometheus/procfs/internal/util +# github.com/rogpeppe/go-internal v1.1.0 github.com/rogpeppe/go-internal/modfile github.com/rogpeppe/go-internal/module github.com/rogpeppe/go-internal/semver # github.com/spf13/afero v1.2.2 github.com/spf13/afero github.com/spf13/afero/mem -# github.com/spf13/cobra v0.0.5 +# github.com/spf13/cobra v0.0.3 github.com/spf13/cobra # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag # github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb github.com/vincent-petithory/dataurl -# go.opencensus.io v0.21.0 +# go.opencensus.io v0.18.0 go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/stats/view go.opencensus.io/trace go.opencensus.io -go.opencensus.io/plugin/ocgrpc -go.opencensus.io/resource -go.opencensus.io/stats -go.opencensus.io/tag go.opencensus.io/trace/tracestate go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/stats +go.opencensus.io/tag go.opencensus.io/trace/propagation +go.opencensus.io/exemplar go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricdata -go.opencensus.io/metric/metricproducer go.opencensus.io/stats/internal go.opencensus.io/internal go.opencensus.io/trace/internal -# go.uber.org/atomic v1.4.0 +# go.uber.org/atomic v1.3.2 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr -# go.uber.org/zap v1.10.0 +# go.uber.org/zap v1.9.1 go.uber.org/zap go.uber.org/zap/buffer go.uber.org/zap/zapcore go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit -# go4.org v0.0.0-20190313082347-94abd6928b1d +# go4.org v0.0.0-20180809161055-417644f6feb5 go4.org/errorutil # golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 golang.org/x/crypto/ssh/terminal @@ -238,19 +221,19 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/context/ctxhttp -golang.org/x/net/html/charset golang.org/x/net/context +golang.org/x/net/html/charset golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/trace golang.org/x/net/internal/timeseries -# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 +# golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c golang.org/x/oauth2 -golang.org/x/oauth2/google golang.org/x/oauth2/internal +golang.org/x/oauth2/google golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20190423024810-112230192c58 +# golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f golang.org/x/sys/unix @@ -276,53 +259,47 @@ golang.org/x/text/runes golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 +# golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b golang.org/x/tools/imports -golang.org/x/tools/internal/imports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/packages golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/module golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/internal/semver golang.org/x/tools/internal/fastwalk golang.org/x/tools/go/internal/gcimporter -# google.golang.org/api v0.6.0 +# google.golang.org/api v0.1.0 google.golang.org/api/support/bundler -# google.golang.org/appengine v1.6.1 -google.golang.org/appengine +# google.golang.org/appengine v1.4.0 google.golang.org/appengine/urlfetch +google.golang.org/appengine google.golang.org/appengine/internal +google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 -google.golang.org/genproto/googleapis/api/httpbody -google.golang.org/genproto/protobuf/field_mask +# google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.20.1 +# google.golang.org/grpc v1.18.0 google.golang.org/grpc -google.golang.org/grpc/credentials -google.golang.org/grpc/metadata -google.golang.org/grpc/codes -google.golang.org/grpc/grpclog -google.golang.org/grpc/status -google.golang.org/grpc/stats google.golang.org/grpc/balancer google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/codes google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff -google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig @@ -330,21 +307,24 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/stats +google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/credentials/internal google.golang.org/grpc/balancer/base +google.golang.org/grpc/credentials/internal google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/internal/syscall # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => k8s.io/api v0.0.0-20190222213804-5cb15d344471 +# k8s.io/api v0.0.0-20190814101207-0772a1bdf941 => k8s.io/api v0.0.0-20190222213804-5cb15d344471 k8s.io/api/core/v1 k8s.io/api/autoscaling/v1 k8s.io/api/apps/v1 @@ -378,10 +358,10 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-00010101000000-000000000000 => k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 +# k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -# k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 +# k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/apis/meta/v1 @@ -395,6 +375,7 @@ k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/util/naming @@ -402,7 +383,6 @@ k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/fields -k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/pkg/apis/meta/v1/validation @@ -420,6 +400,7 @@ k8s.io/apimachinery/pkg/util/version k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/util/mergepatch @@ -427,8 +408,7 @@ k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/apis/config -k8s.io/apimachinery/pkg/apis/meta/internalversion -# k8s.io/client-go v0.0.0 => k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 +# k8s.io/client-go v10.0.0+incompatible k8s.io/client-go/kubernetes k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/record @@ -471,6 +451,8 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/tools/auth k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/util/homedir +k8s.io/client-go/tools/pager +k8s.io/client-go/util/retry k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/leaderelection k8s.io/client-go/tools/leaderelection/resourcelock @@ -496,15 +478,13 @@ k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/util/connrotation -k8s.io/client-go/tools/pager k8s.io/client-go/util/buffer -k8s.io/client-go/util/retry k8s.io/client-go/util/jsonpath k8s.io/client-go/third_party/forked/golang/template -# k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 +# k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util -# k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b +# k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/conversion-gen k8s.io/code-generator/cmd/deepcopy-gen @@ -531,9 +511,9 @@ k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/scheme k8s.io/code-generator/cmd/client-gen/generators/util k8s.io/code-generator/cmd/client-gen/path -# k8s.io/component-base v0.0.0-00010101000000-000000000000 => k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea +# k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d k8s.io/component-base/cli/flag -# k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a +# k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af k8s.io/gengo/args k8s.io/gengo/examples/deepcopy-gen/generators k8s.io/gengo/examples/defaulter-gen/generators @@ -543,9 +523,9 @@ k8s.io/gengo/namer k8s.io/gengo/types k8s.io/gengo/parser k8s.io/gengo/examples/set-gen/sets -# k8s.io/klog v0.3.3 +# k8s.io/klog v0.4.0 k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 +# k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 k8s.io/kube-openapi/pkg/util/proto # k8s.io/kubernetes v1.13.4 k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 @@ -558,9 +538,9 @@ k8s.io/kubernetes/pkg/proxy/apis/config k8s.io/kubernetes/pkg/registry/core/service/ipallocator k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/registry/core/service/allocator -# k8s.io/utils v0.0.0-20190801114015-581e00157fb1 +# k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a k8s.io/utils/exec -# sigs.k8s.io/cluster-api v0.1.4 +# sigs.k8s.io/cluster-api v0.1.9 sigs.k8s.io/cluster-api/cmd/clusterctl/cmd sigs.k8s.io/cluster-api/pkg/apis/cluster/common sigs.k8s.io/cluster-api/pkg/apis @@ -616,7 +596,7 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics sigs.k8s.io/controller-runtime/pkg/webhook/types sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/objectutil -# sigs.k8s.io/controller-tools v0.1.11 +# sigs.k8s.io/controller-tools v0.1.12 sigs.k8s.io/controller-tools/cmd/controller-gen sigs.k8s.io/controller-tools/pkg/crd/generator sigs.k8s.io/controller-tools/pkg/rbac diff --git a/vendor/sigs.k8s.io/cluster-api/Dockerfile b/vendor/sigs.k8s.io/cluster-api/Dockerfile index 5816d9011f..086a6898b9 100644 --- a/vendor/sigs.k8s.io/cluster-api/Dockerfile +++ b/vendor/sigs.k8s.io/cluster-api/Dockerfile @@ -13,16 +13,20 @@ # limitations under the License. # Build the manager binary -FROM golang:1.12.5 as builder +FROM golang:1.12.6 as builder # Copy in the go src WORKDIR ${GOPATH}/src/sigs.k8s.io/cluster-api COPY pkg/ pkg/ COPY cmd/ cmd/ COPY vendor/ vendor/ +COPY go.mod go.mod +COPY go.sum go.sum # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -ldflags '-extldflags "-static"' -o manager sigs.k8s.io/cluster-api/cmd/manager +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} GO111MODULE=on GOFLAGS="-mod=vendor" \ + go build -a -ldflags '-extldflags "-static"' \ + -o manager sigs.k8s.io/cluster-api/cmd/manager # Copy the controller-manager into a thin image FROM gcr.io/distroless/static:latest diff --git a/vendor/sigs.k8s.io/cluster-api/Gopkg.lock b/vendor/sigs.k8s.io/cluster-api/Gopkg.lock index 9cdce90d86..a5fda4985c 100644 --- a/vendor/sigs.k8s.io/cluster-api/Gopkg.lock +++ b/vendor/sigs.k8s.io/cluster-api/Gopkg.lock @@ -1087,7 +1087,7 @@ [[projects]] branch = "release-1.13" - digest = "1:2d4e25d510d99febad7145a01c6fe92037e39cae579d5fc0e23963f7a96d350c" + digest = "1:6a5942e71b8f97617b6984f11894f857dbe6ed3d564ad268b649802dc4c7dbcb" name = "k8s.io/code-generator" packages = [ "cmd/client-gen", @@ -1098,14 +1098,23 @@ "cmd/client-gen/generators/util", "cmd/client-gen/path", "cmd/client-gen/types", + "cmd/conversion-gen", + "cmd/conversion-gen/args", + "cmd/conversion-gen/generators", "cmd/deepcopy-gen", "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", "cmd/informer-gen", "cmd/informer-gen/args", "cmd/informer-gen/generators", "cmd/lister-gen", "cmd/lister-gen/args", "cmd/lister-gen/generators", + "cmd/register-gen", + "cmd/register-gen/args", + "cmd/register-gen/generators", + "cmd/set-gen", "pkg/util", ] pruneopts = "UT" @@ -1121,11 +1130,13 @@ [[projects]] branch = "master" - digest = "1:28514fabca4356625720ffb012408790a9d00d31963a9bd9daf7b5ccd894c301" + digest = "1:4b51f480ae0b1b401eb343db1807601fd45f827ec16c4dfe3275e74fd47b78a1" name = "k8s.io/gengo" packages = [ "args", "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/generators", "examples/set-gen/sets", "generator", "namer", @@ -1291,9 +1302,13 @@ "k8s.io/client-go/util/integer", "k8s.io/client-go/util/retry", "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/conversion-gen", "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/register-gen", + "k8s.io/code-generator/cmd/set-gen", "k8s.io/component-base/cli/flag", "k8s.io/klog", "k8s.io/klog/klogr", diff --git a/vendor/sigs.k8s.io/cluster-api/Makefile b/vendor/sigs.k8s.io/cluster-api/Makefile index 8d70c23512..68969c6607 100644 --- a/vendor/sigs.k8s.io/cluster-api/Makefile +++ b/vendor/sigs.k8s.io/cluster-api/Makefile @@ -22,7 +22,7 @@ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?=60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?=60s # Image URL to use all building/pushing image targets -export CONTROLLER_IMG ?= gcr.io/k8s-cluster-api/cluster-api-controller:v0.1.4 +export CONTROLLER_IMG ?= gcr.io/k8s-staging-cluster-api/cluster-api-controller:v0.1.9 export EXAMPLE_PROVIDER_IMG ?= gcr.io/k8s-cluster-api/example-provider-controller:latest all: test manager clusterctl @@ -34,12 +34,14 @@ help: ## Display this help gazelle: ## Run Bazel Gazelle (which bazel && ./hack/update-bazel.sh) || true -.PHONY: dep-ensure -dep-ensure: ## Runs dep-ensure and rebuilds Bazel gazelle files. - find vendor -name 'BUILD.bazel' -delete - (which dep && dep ensure -v) || true +.PHONY: vendor +vendor: ## Runs go mod to ensure proper vendoring. + ./hack/update-vendor.sh $(MAKE) gazelle +bin/%-gen: ./vendor/k8s.io/code-generator/cmd/%-gen ## Build code-generator binaries + go build -o $@ ./$< + .PHONY: test test: gazelle verify generate fmt vet manifests ## Run tests go test -v -tags=integration ./pkg/... ./cmd/... @@ -75,28 +77,27 @@ vet: ## Run go vet against code go vet ./pkg/... ./cmd/... .PHONY: lint -lint: dep-ensure ## Lint codebase +lint: ## Lint codebase bazel run //:lint $(BAZEL_ARGS) -lint-full: dep-ensure ## Run slower linters to detect possible issues +lint-full: ## Run slower linters to detect possible issues bazel run //:lint-full $(BAZEL_ARGS) .PHONY: generate -generate: clientset dep-ensure ## Generate code +generate: clientset ## Generate code go generate ./pkg/... ./cmd/... $(MAKE) gazelle .PHONY: clientset -clientset: ## Generate a typed clientset +clientset: bin/client-gen bin/lister-gen bin/informer-gen ## Generate a typed clientset rm -rf pkg/client - cd ./vendor/k8s.io/code-generator/cmd && go install ./client-gen ./lister-gen ./informer-gen - $$GOPATH/bin/client-gen --clientset-name clientset --input-base sigs.k8s.io/cluster-api/pkg/apis \ + bin/client-gen --clientset-name clientset --input-base sigs.k8s.io/cluster-api/pkg/apis \ --input cluster/v1alpha1 --output-package sigs.k8s.io/cluster-api/pkg/client/clientset_generated \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - $$GOPATH/bin/lister-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ + bin/lister-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ --output-package sigs.k8s.io/cluster-api/pkg/client/listers_generated \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - $$GOPATH/bin/informer-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ + bin/informer-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ --versioned-clientset-package sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset \ --listers-package sigs.k8s.io/cluster-api/pkg/client/listers_generated \ --output-package sigs.k8s.io/cluster-api/pkg/client/informers_generated \ diff --git a/vendor/sigs.k8s.io/cluster-api/OWNERS_ALIASES b/vendor/sigs.k8s.io/cluster-api/OWNERS_ALIASES index e7059b003e..30f454c46e 100644 --- a/vendor/sigs.k8s.io/cluster-api/OWNERS_ALIASES +++ b/vendor/sigs.k8s.io/cluster-api/OWNERS_ALIASES @@ -12,4 +12,5 @@ aliases: cluster-api-maintainers: - justinsb - detiber + - ncdc - vincepri diff --git a/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml index edf8c7b3a3..9ff239f49f 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml @@ -13,5 +13,5 @@ bases: - ../rbac/ - ../manager/ -patches: +patchesStrategicMerge: - manager_image_patch.yaml diff --git a/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml b/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml index a2ba4568c5..5405548e2a 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: gcr.io/k8s-cluster-api/cluster-api-controller:v0.1.4 + - image: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.1.9 name: manager diff --git a/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md b/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md index 57f7206143..34209dc776 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md @@ -9,12 +9,17 @@ ### Artifact locations -1. The container image is found in the registry `gcr.io/k8s-cluster-api` with an - image name of `cluster-api-controller` and a tag that matches the release - version. For example, in the `v0.1.4` release, the container image - location is `gcr.io/k8s-cluster-api/cluster-api-controller:v0.1.4` +1. The container image is found in the registry `us.gcr.io/k8s-artifacts-prod/cluster-api/` with an image + name of `cluster-api-controller` and a tag that matches the release version. For + example, in the `v0.1.5` release, the container image location is + `us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.1.5` -1. Prior to the `v0.1.4` release, the container image is found in the +2. Prior to the `v0.1.5` release, the container image is found in the registry + `gcr.io/k8s-cluster-api` with an image name of `cluster-api-controller` and a tag + that matches the release version. For example, in the `v0.1.4` release, the container + image location is `gcr.io/k8s-cluster-api/cluster-api-controller:v0.1.4` + +3. Prior to the `v0.1.4` release, the container image is found in the registry `gcr.io/k8s-cluster-api` with an image name of `cluster-api-controller` and a tag that matches the release version. For example, in the `0.1.3` release, the container image location is `gcr.io/k8s-cluster-api/cluster-api-controller:0.1.3` @@ -23,26 +28,19 @@ For version v0.x.y: -1. We will target a branch called `release-0.x`. If this is `v0.x.0` then we'll - create a branch from master using `git push origin master:release-0.x`, otherwise - simply checkout the existing branch `git checkout release-0.x` +1. We will target a branch called `release-0.x`. If this is `v0.x.0` then we'll create a branch from master using `git push origin master:release-0.x`, otherwise simply checkout the existing branch `git checkout release-0.x` 2. Make two changes: - 1. Change [the cluster api controller manager image - tag][managerimg] from `:latest` to whatever version is being released - 1. Change the `CONTROLLER_IMG` variable in the [Makefile][makefile] to the - version being released - (Note that we do not release the example-provider image, so we don't tag that) + 1. Change [the cluster api controller manager image tag][managerimg] from `:latest` to whatever version is being released + 1. Change the `CONTROLLER_IMG` variable in the [Makefile][makefile] to the version being released (Note that we do not release the example-provider image, so we don't tag that) 3. Commit it using `git commit -m "Release v0.x.y"` 4. Submit a PR to the `release-0.x` branch, e.g. `git push $USER; hub pull-request -b release-0.x` 5. Get the pull request merged -6. Switch to the release branch and update to pick up the commit. (e.g. `git - checkout release 0.x && git pull`). From there build and push the container - images and fat manifest with `REGISTRY="gcr.io/k8s-cluster-api" make all-push` (on the 0.1 release branch, we - do `make docker-push`) +6. Switch to the release branch and update to pick up the commit. (e.g. `git checkout release 0.x && git pull`). 7. Create an annotated tag from this same commit `git tag -a v0.x.y -m v0.x.y` and push the tag to the github repository `git push origin v0.x.y` -8. Create a release in github based on the tag created above -9. Manually create the release notes by going through the merged PRs since the - last release +8. Build and push the container images and fat manifest with `REGISTRY="gcr.io/k8s-staging-cluster-api" make all-push` (on the 0.1 release branch, we do `make docker-push`) +9. Follow the [Image Promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter) to promote the image from the staging repo to `k8s.gcr.io/cluster-api` +10. Create a release in github based on the tag created above +11. Manually create the release notes by going through the merged PRs since the last release [managerimg]: https://github.com/kubernetes-sigs/cluster-api/blob/fab4c07ea9fb0f124a5abe3dd7fcfffc23f2a1b3/config/default/manager_image_patch.yaml [makefile]: https://github.com/kubernetes-sigs/cluster-api/blob/fab4c07ea9fb0f124a5abe3dd7fcfffc23f2a1b3/Makefile diff --git a/vendor/sigs.k8s.io/cluster-api/go.mod b/vendor/sigs.k8s.io/cluster-api/go.mod new file mode 100644 index 0000000000..c2ff9acd5a --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/go.mod @@ -0,0 +1,69 @@ +module sigs.k8s.io/cluster-api + +go 1.12 + +require ( + cloud.google.com/go v0.35.1 // indirect + contrib.go.opencensus.io/exporter/ocagent v0.2.0 // indirect + github.com/Azure/go-autorest v11.5.0+incompatible // indirect + github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect + github.com/census-instrumentation/opencensus-proto v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.1 + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/evanphx/json-patch v4.2.0+incompatible // indirect + github.com/go-logr/logr v0.1.0 // indirect + github.com/go-logr/zapr v0.1.0 // indirect + github.com/gobuffalo/envy v1.6.12 // indirect + github.com/gogo/protobuf v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff // indirect + github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.3.0 // indirect + github.com/google/gofuzz v1.0.0 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 // indirect + github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/imdario/mergo v0.3.7 // indirect + github.com/json-iterator/go v1.1.6 // indirect + github.com/markbates/deplist v1.0.5 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/pborman/uuid v1.2.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f // indirect + github.com/prometheus/common v0.1.0 // indirect + github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 // indirect + github.com/sergi/go-diff v1.0.0 + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 + go.uber.org/atomic v1.3.2 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.9.1 // indirect + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 + golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c // indirect + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect + golang.org/x/text v0.3.2 // indirect + golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b // indirect + google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 // indirect + google.golang.org/grpc v1.18.0 // indirect + k8s.io/api v0.0.0-20190222213804-5cb15d344471 + k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 // indirect + k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76 + k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa + k8s.io/client-go v10.0.0+incompatible + k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b + k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d + k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af // indirect + k8s.io/klog v0.3.2 + k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 // indirect + sigs.k8s.io/controller-runtime v0.1.12 + sigs.k8s.io/controller-tools v0.1.11 + sigs.k8s.io/testing_frameworks v0.1.1 + sigs.k8s.io/yaml v1.1.0 // indirect +) + +replace k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 diff --git a/vendor/sigs.k8s.io/cluster-api/go.sum b/vendor/sigs.k8s.io/cluster-api/go.sum new file mode 100644 index 0000000000..40bf86c96f --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/go.sum @@ -0,0 +1,646 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8= +cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= +contrib.go.opencensus.io/exporter/ocagent v0.2.0 h1:Q/jXnVbliDYozuWJni9452xsSUuo+y8yrioxRgofBhE= +contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/go-autorest v11.5.0+incompatible h1:zp9GQJhEX+EBqEYC2MEGQ+gjKFEPRAWtfwcmstS2hGk= +github.com/Azure/go-autorest v11.5.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= +github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.1.0 h1:VwZ9smxzX8u14/125wHIX7ARV+YhR+L4JADswwxWK0Y= +github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= +github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= +github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= +github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/envy v1.6.12 h1:zkhss8DXz/pty2HAyA8BnvWMTYxo4gjd4+WCnYovoxY= +github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= +github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= +github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 h1:UVNu/91ffTOzayxrORCp4UVGrYH05xTq8DuNR7ILW1Q= +github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= +github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.1.0 h1:IxU7wGikQPAcoOd3/f4Ol7+vIKS1Sgu08tzjktR4nJE= +github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.0 h1:O9FblXGxoTc51M+cqr74Bm2Tmt4PvkA5iu/j8HrkNuY= +github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= +go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c h1:pcBdqVcrlT+A3i+tWsOROFONQyey9tisIQHI4xqVGLg= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b h1:QhDb4isMLF+1hiAgAqj1YPRPQh+eAqwfG6wJzp57Iuo= +golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 h1:SdCO7As+ChE1iV3IjBleIIWlj8VjZWuYEUF5pjELOOQ= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= +k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIgYKCWEtYQkcNrTpW+lMO4GJy8NP6SVQmM= +k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= +k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa h1:lje7gmtp7V1upmFWKah1sprCvmd/9GvlRZsBlRauCVc= +k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= +k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b h1:KH0fUlgdFZH8UMxJ/FDCYHpczfSQKefetq5NjL6BVF0= +k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= +k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d h1:rnHKkkKa8rR4DkemPeIzuL+/Ks2zZj6d3OV/HYD0t/E= +k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d/go.mod h1:lD66vYoxfIYwZTZunUYSg/e6fEj0fcgked2esAmsIfA= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.2 h1:qvP/U6CcZ6qyi/qSHlJKdlAboCzo3mT0DAm0XAarpz4= +k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo= +sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= +sigs.k8s.io/controller-tools v0.1.11 h1:1ln8Ipmg2xosbeHmnlzG53kwO1f3yf9l6auvCHdbZpI= +sigs.k8s.io/controller-tools v0.1.11/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= +sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel new file mode 100644 index 0000000000..d306c1084e --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tools.go"], + importpath = "sigs.k8s.io/cluster-api/hack", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/code-generator/cmd/client-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/conversion-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/deepcopy-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/defaulter-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/informer-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/lister-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/register-gen:go_default_library", + "//vendor/k8s.io/code-generator/cmd/set-gen:go_default_library", + "//vendor/sigs.k8s.io/controller-tools/cmd/controller-gen:go_default_library", + "//vendor/sigs.k8s.io/testing_frameworks/integration:go_default_library", + ], +) diff --git a/vendor/sigs.k8s.io/cluster-api/hack/ensure-go.sh b/vendor/sigs.k8s.io/cluster-api/hack/ensure-go.sh new file mode 100644 index 0000000000..97ebe6055d --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/hack/ensure-go.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# Ensure the go tool exists and is a viable version. +verify_go_version() { + if [[ -z "$(command -v go)" ]]; then + cat < finalizerCount { if err := r.Update(context.Background(), cluster); err != nil { klog.Infof("Failed to add finalizer to cluster %q: %v", name, err) - return reconcile.Result{}, err + return reconcile.Result{}, errors.WithStack(err) } // Since adding the finalizer updates the object return to avoid later update issues. @@ -118,33 +142,155 @@ func (r *ReconcileCluster) Reconcile(request reconcile.Request) (reconcile.Resul if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { // no-op if finalizer has been removed. if !util.Contains(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) { - klog.Infof("reconciling cluster object %v causes a no-op as there is no finalizer.", name) + klog.Infof("Reconciling cluster object %v causes a no-op as there is no finalizer.", name) return reconcile.Result{}, nil } - klog.Infof("reconciling cluster object %v triggers delete.", name) + children, err := r.listChildren(context.Background(), cluster) + if err != nil { + klog.Errorf("Failed to list dependent objects of cluster %s/%s: %v", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.Name, err) + return reconcile.Result{}, err + } + + if len(children) > 0 { + klog.Infof("Deleting cluster %s: %d children still exist, will requeue", name, len(children)) + + var errList []error + + for _, child := range children { + accessor, err := meta.Accessor(child) + if err != nil { + klog.Errorf("cluster %s: couldn't create accessor for %T: %v", name, child, err) + continue + } + + if accessor.GetDeletionTimestamp() != nil { + continue + } + + gvk := child.GetObjectKind().GroupVersionKind().String() + + klog.V(4).Infof("Deleting cluster %s: Deleting %s %s", name, gvk, accessor.GetName()) + if err := r.Delete(context.Background(), child, client.PropagationPolicy(metav1.DeletePropagationForeground)); err != nil { + err = errors.Wrapf(err, "deleting cluster %s: failed to delete %s %s", name, gvk, accessor.GetName()) + klog.Errorf("Deletion error: %v", err) + errList = append(errList, err) + } + } + + if len(errList) > 0 { + return reconcile.Result{}, errorag.NewAggregate(errList) + + } + + return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}, nil + } + + klog.Infof("Reconciling cluster object %v triggers delete.", name) if err := r.actuator.Delete(cluster); err != nil { klog.Errorf("Error deleting cluster object %v; %v", name, err) - return reconcile.Result{}, err + return reconcile.Result{}, errors.WithStack(err) } + // Remove finalizer on successful deletion. - klog.Infof("cluster object %v deletion successful, removing finalizer.", name) - cluster.ObjectMeta.Finalizers = util.Filter(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) - if err := r.Client.Update(context.Background(), cluster); err != nil { + klog.Infof("Cluster object %v deletion successful, removing finalizer.", name) + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + // It's possible the actuator's Delete call modified the cluster. We can't guarantee that every provider + // updated the in memory cluster object with the latest copy of the cluster, so try to get a fresh copy. + // + // Note, because the get from the client is a cached read from the shared informer's cache, there's still a + // chance this could be a stale read. + // + // Note 2, this is not a Patch call because the version of controller-runtime in the release-0.1 branch + // does not support patching. + err := r.Get(context.Background(), request.NamespacedName, cluster) + if err != nil { + return err + } + + cluster.ObjectMeta.Finalizers = util.Filter(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) + + return r.Client.Update(context.Background(), cluster) + }) + if err != nil { klog.Errorf("Error removing finalizer from cluster object %v; %v", name, err) - return reconcile.Result{}, err + return reconcile.Result{}, errors.WithStack(err) } + return reconcile.Result{}, nil } - klog.Infof("reconciling cluster object %v triggers idempotent reconcile.", name) + klog.Infof("Reconciling cluster object %v triggers idempotent reconcile.", name) if err := r.actuator.Reconcile(cluster); err != nil { if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok { klog.Infof("Actuator returned requeue-after error: %v", requeueErr) return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil } klog.Errorf("Error reconciling cluster object %v; %v", name, err) - return reconcile.Result{}, err + return reconcile.Result{}, errors.WithStack(err) } return reconcile.Result{}, nil } + +// listChildren returns a list of Deployments, Sets, and Machines than have an ownerref to the given cluster +func (r *ReconcileCluster) listChildren(ctx context.Context, cluster *clusterv1.Cluster) ([]runtime.Object, error) { + var children []runtime.Object + + ns := cluster.GetNamespace() + opts := metav1.ListOptions{ + LabelSelector: labels.FormatLabels( + map[string]string{clusterv1.MachineClusterLabelName: cluster.GetName()}, + ), + } + + dfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { + return r.clusterClient.MachineDeployments(ns).List(m) + } + sfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { + return r.clusterClient.MachineSets(ns).List(m) + } + mfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { + return r.clusterClient.Machines(ns).List(m) + } + + eachFunc := func(o runtime.Object) error { + + acc, err := meta.Accessor(o) + if err != nil { + klog.Errorf("cluster %s: couldn't create accessor for %T: %v", cluster.Name, o, err) + return nil + } + + if util.PointsTo(acc.GetOwnerReferences(), &cluster.ObjectMeta) { + children = append(children, o) + } + + return nil + } + + deployments, err := pager.New(dfunc).List(ctx, opts) + if err != nil { + return nil, errors.Wrapf(err, "failed to list MachineDeployments in %s/%s", ns, cluster.Name) + } + if err := meta.EachListItem(deployments, eachFunc); err != nil { + return nil, errors.Wrapf(err, "couldn't iterate MachinesDeployments for cluster %s/%s", ns, cluster.Name) + } + + sets, err := pager.New(sfunc).List(ctx, opts) + if err != nil { + return nil, errors.Wrapf(err, "failed to list MachineSets in %s/%s", ns, cluster.Name) + } + if err := meta.EachListItem(sets, eachFunc); err != nil { + return nil, errors.Wrapf(err, "couldn't iterate MachineSets for cluster %s/%s", ns, cluster.Name) + } + + machines, err := pager.New(mfunc).List(ctx, opts) + if err != nil { + return nil, errors.Wrapf(err, "failed to list Machines in %s/%s", ns, cluster.Name) + } + if err := meta.EachListItem(machines, eachFunc); err != nil { + return nil, errors.Wrapf(err, "couldn't iterate Machines for cluster %s/%s", ns, cluster.Name) + } + + return children, nil +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go index c6268a634f..dd956e7eeb 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go @@ -55,7 +55,11 @@ func TestReconcile(t *testing.T) { c = mgr.GetClient() a := newTestActuator() - recFn, requests := SetupTestReconcile(newReconciler(mgr, a)) + r, err := newReconciler(mgr, a) + if err != nil { + t.Fatalf("Couldn't create controller: %v", err) + } + recFn, requests := SetupTestReconcile(r) if err := add(mgr, recFn); err != nil { t.Fatalf("error adding controller to manager: %v", err) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go index 5352e22217..2a7b7bc573 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" controllerError "sigs.k8s.io/cluster-api/pkg/controller/error" "sigs.k8s.io/cluster-api/pkg/controller/remote" @@ -122,7 +123,8 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul } // Set the ownerRef with foreground deletion if there is a linked cluster. - if cluster != nil && len(m.OwnerReferences) == 0 { + if cluster != nil && shouldAdopt(m) { + klog.Infof("Cluster %s/%s is adopting Machine %s", cluster.Namespace, cluster.Name, m.Name) blockOwnerDeletion := true m.OwnerReferences = append(m.OwnerReferences, metav1.OwnerReference{ APIVersion: cluster.APIVersion, @@ -133,28 +135,13 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul }) } - // If object hasn't been deleted and doesn't have a finalizer, add one - // Add a finalizer to newly created objects. - if m.ObjectMeta.DeletionTimestamp.IsZero() { - finalizerCount := len(m.Finalizers) - - if cluster != nil && !util.Contains(m.Finalizers, metav1.FinalizerDeleteDependents) { - m.Finalizers = append(m.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) - } - - if !util.Contains(m.Finalizers, clusterv1.MachineFinalizer) { - m.Finalizers = append(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) - } - - if len(m.Finalizers) > finalizerCount { - if err := r.Client.Update(ctx, m); err != nil { - klog.Infof("Failed to add finalizers to machine %q: %v", name, err) - return reconcile.Result{}, err - } - - // Since adding the finalizer updates the object return to avoid later update issues - return reconcile.Result{Requeue: true}, nil + if reconcileFinalizers(m, cluster) { + if err := r.Client.Update(ctx, m); err != nil { + klog.Infof("Failed to add finalizers to machine %q: %v", name, err) + return reconcile.Result{}, err } + // Since adding the finalizer updates the object return to avoid later update issues + return reconcile.Result{Requeue: true}, nil } if !m.ObjectMeta.DeletionTimestamp.IsZero() { @@ -180,7 +167,17 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul return reconcile.Result{}, err } - if m.Status.NodeRef != nil { + if err := r.isDeleteNodeAllowed(context.Background(), m); err != nil { + switch err { + case errNilNodeRef: + klog.V(2).Infof("Deleting node is not allowed for machine %q: %v", m.Name, err) + case errNoControlPlaneNodes, errLastControlPlaneNode: + klog.V(2).Infof("Deleting node %q is not allowed for machine %q: %v", m.Status.NodeRef.Name, m.Name, err) + default: + klog.Errorf("IsDeleteNodeAllowed check failed for machine %q: %v", name, err) + return reconcile.Result{}, err + } + } else { klog.Infof("Deleting node %q for machine %q", m.Status.NodeRef.Name, m.Name) if err := r.deleteNode(ctx, cluster, m.Status.NodeRef.Name); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("Error deleting node %q for machine %q: %v", m.Status.NodeRef.Name, name, err) @@ -235,6 +232,29 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul return reconcile.Result{}, nil } +// reconcileFinalizers appends any missing finalizers to the machine +// and returns true if the api server needs to be updated. +func reconcileFinalizers(m *clusterv1.Machine, cluster *clusterv1.Cluster) bool { + // If object hasn't been deleted and doesn't have a finalizer, add one + // Add a finalizer to newly created objects. + if m.ObjectMeta.DeletionTimestamp.IsZero() { + finalizerCount := len(m.Finalizers) + + if cluster != nil && !util.Contains(m.Finalizers, metav1.FinalizerDeleteDependents) { + m.Finalizers = append(m.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) + } + + if !util.Contains(m.Finalizers, clusterv1.MachineFinalizer) { + m.Finalizers = append(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) + } + + if len(m.Finalizers) > finalizerCount { + return true + } + } + return false +} + func (r *ReconcileMachine) getCluster(ctx context.Context, machine *clusterv1.Machine) (*clusterv1.Cluster, error) { if machine.Labels[clusterv1.MachineClusterLabelName] == "" { klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", machine.Name, machine.Namespace, clusterv1.MachineClusterLabelName) @@ -278,6 +298,44 @@ func (r *ReconcileMachine) isDeleteAllowed(machine *clusterv1.Machine) bool { return node.UID != machine.Status.NodeRef.UID } +var ( + errNilNodeRef = errors.New("noderef is nil") + errLastControlPlaneNode = errors.New("last control plane member") + errNoControlPlaneNodes = errors.New("no control plane members") +) + +// isDeleteNodeAllowed returns nil only if the Machine's NodeRef is not nil +// and if the Machine is not the last control plane node in the cluster. +func (r *ReconcileMachine) isDeleteNodeAllowed(ctx context.Context, machine *clusterv1.Machine) error { + // Cannot delete something that doesn't exist. + if machine.Status.NodeRef == nil { + return errNilNodeRef + } + + // Get all of the machines that belong to this cluster. + machines, err := r.getMachinesInCluster(ctx, machine.Namespace, machine.Labels[clusterv1.MachineClusterLabelName]) + if err != nil { + return err + } + + // Whether or not it is okay to delete the NodeRef depends on the + // number of remaining control plane members and whether or not this + // machine is one of them. + switch numControlPlaneMachines := len(util.GetControlPlaneMachines(machines)); { + case numControlPlaneMachines == 0: + // Do not delete the NodeRef if there are no remaining members of + // the control plane. + return errNoControlPlaneNodes + case numControlPlaneMachines == 1 && util.IsControlPlaneMachine(machine): + // Do not delete the NodeRef if this is the last member of the + // control plane. + return errLastControlPlaneNode + default: + // Otherwise it is okay to delete the NodeRef. + return nil + } +} + func (r *ReconcileMachine) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, name string) error { if cluster == nil { // Try to retrieve the Node from the local cluster, if no Cluster reference is found. @@ -305,3 +363,30 @@ func (r *ReconcileMachine) deleteNode(ctx context.Context, cluster *clusterv1.Cl return corev1Remote.Nodes().Delete(name, &metav1.DeleteOptions{}) } + +// getMachinesInCluster returns all of the Machine objects that belong to the +// same cluster as the provided Machine +func (r *ReconcileMachine) getMachinesInCluster(ctx context.Context, namespace, name string) ([]*clusterv1.Machine, error) { + if name == "" { + return nil, nil + } + + machineList := &clusterv1.MachineList{} + labels := map[string]string{clusterv1.MachineClusterLabelName: name} + listOptions := client.InNamespace(namespace).MatchingLabels(labels) + + if err := r.Client.List(ctx, listOptions, machineList); err != nil { + return nil, errors.Wrap(err, "failed to list machines") + } + + machines := make([]*clusterv1.Machine, len(machineList.Items)) + for i := range machineList.Items { + machines[i] = &machineList.Items[i] + } + + return machines, nil +} + +func shouldAdopt(m *v1alpha1.Machine) bool { + return !util.HasOwner(m.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"MachineSet", "MachineDeployment", "Cluster"}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go index 49cf6128ff..cf00795b4c 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go @@ -18,12 +18,14 @@ package machine import ( "reflect" + "sort" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -191,3 +193,92 @@ func TestReconcileRequest(t *testing.T) { } } } + +func TestReconcileFinalizers(t *testing.T) { + // If we need to add new finalizer logic later, add them here as well. + allFinalizers := []string{metav1.FinalizerDeleteDependents, clusterv1.MachineFinalizer} + nilClusterFinalizers := []string{clusterv1.MachineFinalizer} + + cluster1 := v1alpha1.Cluster{} + + testCases := []struct { + cluster *v1alpha1.Cluster + startingFinalizers []string + deleted bool + expected bool + expectedFinalizers []string + }{ + { + cluster: &cluster1, + startingFinalizers: []string{}, + deleted: false, + expected: true, + expectedFinalizers: allFinalizers, + }, + { + cluster: &cluster1, + startingFinalizers: []string{metav1.FinalizerDeleteDependents}, + deleted: false, + expected: true, + expectedFinalizers: allFinalizers, + }, + { + cluster: &cluster1, + startingFinalizers: []string{clusterv1.MachineFinalizer}, + deleted: false, + expected: true, + expectedFinalizers: allFinalizers, + }, + { + cluster: nil, + startingFinalizers: []string{}, + deleted: false, + expected: true, + expectedFinalizers: nilClusterFinalizers, + }, + { + cluster: nil, + startingFinalizers: []string{clusterv1.MachineFinalizer}, + deleted: false, + expected: false, + expectedFinalizers: nilClusterFinalizers, + }, + { + cluster: &cluster1, + startingFinalizers: []string{}, + deleted: true, + expected: false, + expectedFinalizers: []string{}, + }, + { + cluster: &cluster1, + startingFinalizers: allFinalizers, + deleted: true, + expected: false, + expectedFinalizers: allFinalizers, + }, + } + + for i, tc := range testCases { + m := v1alpha1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: tc.startingFinalizers, + }, + } + if tc.deleted { + time := metav1.Now() + m.ObjectMeta.DeletionTimestamp = &time + } + needUpdate := reconcileFinalizers(&m, tc.cluster) + if needUpdate != tc.expected { + t.Errorf("Case: %d. Expected %v, got: %v", i, tc.expected, needUpdate) + } + // sort for easier comparison: [a, b] != [b, a] for DeepEqual. + sort.Strings(tc.expectedFinalizers) + sort.Strings(m.Finalizers) + + if !reflect.DeepEqual(tc.expectedFinalizers, m.Finalizers) { + t.Errorf("Case: %d. Expected %v finializers, got: %v", i, tc.expectedFinalizers, m.Finalizers) + } + } +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go index 46b2f1747f..a142cdfceb 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go @@ -173,7 +173,8 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1. } // Set the ownerRef with foreground deletion if there is a linked cluster. - if cluster != nil && len(d.OwnerReferences) == 0 { + if cluster != nil && shouldAdopt(d) { + klog.Infof("Cluster %s/%s is adopting MachineDeployment %s", cluster.Namespace, cluster.Name, d.Name) blockOwnerDeletion := true d.OwnerReferences = append(d.OwnerReferences, metav1.OwnerReference{ APIVersion: cluster.APIVersion, @@ -412,3 +413,7 @@ func (r *ReconcileMachineDeployment) MachineSetToDeployments(o handler.MapObject return result } + +func shouldAdopt(md *v1alpha1.MachineDeployment) bool { + return !util.HasOwner(md.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"Cluster"}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go index 40bb70876a..d0e82134e2 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "k8s.io/klog" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" @@ -170,7 +171,8 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster } // Set the ownerRef with foreground deletion if there is a linked cluster. - if cluster != nil && len(machineSet.OwnerReferences) == 0 { + if cluster != nil && shouldAdopt(machineSet) { + klog.Infof("Cluster %s/%s is adopting MachineSet %s", cluster.Namespace, cluster.Name, machineSet.Name) blockOwnerDeletion := true machineSet.OwnerReferences = append(machineSet.OwnerReferences, metav1.OwnerReference{ APIVersion: cluster.APIVersion, @@ -256,6 +258,12 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster return reconcile.Result{RequeueAfter: time.Duration(updatedMS.Spec.MinReadySeconds) * time.Second}, nil } + // Quickly rereconcile until the nodes become Ready. + if updatedMS.Status.ReadyReplicas != replicas { + klog.V(4).Info("Some nodes are not ready yet, requeuing until they are ready") + return reconcile.Result{RequeueAfter: 15 * time.Second}, nil + } + return reconcile.Result{}, nil } @@ -488,3 +496,7 @@ func (r *ReconcileMachineSet) MachineToMachineSets(o handler.MapObject) []reconc return result } + +func shouldAdopt(ms *v1alpha1.MachineSet) bool { + return !util.HasOwner(ms.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"MachineDeployment", "Cluster"}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go index 1234ff3e8c..dc475e3ffd 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go @@ -87,7 +87,7 @@ func (c *ReconcileMachineSet) calculateStatus(ms *v1alpha1.MachineSet, filteredM // updateMachineSetStatus attempts to update the Status.Replicas of the given MachineSet, with a single GET/PUT retry. func updateMachineSetStatus(c client.Client, ms *v1alpha1.MachineSet, newStatus v1alpha1.MachineSetStatus) (*v1alpha1.MachineSet, error) { // This is the steady state. It happens when the MachineSet doesn't have any expectations, since - // we do a periodic relist every 30s. If the generations differ but the replicas are + // we do a periodic relist every 10 minutes. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. if ms.Status.Replicas == newStatus.Replicas && ms.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas && diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go index 5cce73caa5..ef9b0530bb 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go @@ -132,9 +132,14 @@ func (r *ReconcileNodeRef) Reconcile(request reconcile.Request) (reconcile.Resul result, err := r.reconcile(ctx, cluster, machine) if err != nil { + if err == ErrNodeNotFound { + klog.Warningf("Failed to assign NodeRef to Machine %q: cannot find a matching Node in namespace %q, retrying later", machine.Name, machine.Namespace) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + klog.Errorf("Failed to assign NodeRef to Machine %q: %v", request.NamespacedName, err) r.recorder.Event(machine, apicorev1.EventTypeWarning, "FailedSetNodeRef", err.Error()) - return result, err + return reconcile.Result{}, err } klog.Infof("Set Machine's (%q in namespace %q) NodeRef to %q", machine.Name, machine.Namespace, machine.Status.NodeRef.Name) @@ -161,10 +166,6 @@ func (r *ReconcileNodeRef) reconcile(ctx context.Context, cluster *v1alpha1.Clus // Get the Node reference. nodeRef, err := r.getNodeReference(corev1Client, providerID) if err != nil { - if err == ErrNodeNotFound { - klog.Warningf("Cannot find a matching Node for Machine %q in namespace %q, retrying later", machine.Name, machine.Namespace) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } return reconcile.Result{}, err } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/BUILD.bazel index bd9270ba60..a42c374092 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/BUILD.bazel @@ -22,7 +22,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid.go index a316f28e4b..85b94c7026 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid.go @@ -18,34 +18,51 @@ package noderefutil import ( "errors" - "net/url" - "path" + "regexp" + "strings" ) var ( ErrEmptyProviderID = errors.New("providerID is empty") - ErrInvalidProviderID = errors.New("providerID is not valid") + ErrInvalidProviderID = errors.New("providerID must be of the form :////") ) // ProviderID is a struct representation of a Kubernetes ProviderID. // Format: cloudProvider://optional/segments/etc/id type ProviderID struct { - value *url.URL + original string + cloudProvider string + id string } +/* + - must start with at least one non-colon + - followed by :// + - followed by any number of characters + - must end with a non-slash +*/ +var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$") + // NewProviderID parses the input string and returns a new ProviderID. func NewProviderID(id string) (*ProviderID, error) { if id == "" { return nil, ErrEmptyProviderID } - parsed, err := url.Parse(id) - if err != nil { - return nil, err + if !providerIDRegex.MatchString(id) { + return nil, ErrInvalidProviderID } + colonIndex := strings.Index(id, ":") + cloudProvider := id[0:colonIndex] + + lastSlashIndex := strings.LastIndex(id, "/") + instance := id[lastSlashIndex+1:] + res := &ProviderID{ - value: parsed, + original: id, + cloudProvider: cloudProvider, + id: instance, } if !res.Validate() { @@ -57,12 +74,12 @@ func NewProviderID(id string) (*ProviderID, error) { // CloudProvider returns the cloud provider portion of the ProviderID. func (p *ProviderID) CloudProvider() string { - return p.value.Scheme + return p.cloudProvider } // ID returns the identifier portion of the ProviderID. func (p *ProviderID) ID() string { - return path.Base(p.value.Path) + return p.id } // Equals returns true if both the CloudProvider and ID match. @@ -72,12 +89,10 @@ func (p *ProviderID) Equals(o *ProviderID) bool { // String returns the string representation of this object. func (p *ProviderID) String() string { - return p.value.String() + return p.original } // Validate returns true if the provider id is valid. func (p *ProviderID) Validate() bool { - return p.CloudProvider() != "" && - p.ID() != "" && - p.ID() != "/" // path.Base returns "/" if consists only of slashes. + return p.CloudProvider() != "" && p.ID() != "" } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid_test.go index 6cd74be7f9..03a588438b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderefutil/providerid_test.go @@ -17,56 +17,104 @@ limitations under the License. package noderefutil import ( - "strings" "testing" - - "github.com/pkg/errors" ) +const aws = "aws" + func TestNewProviderID(t *testing.T) { - input1 := "aws:////instance-id" - _, err := NewProviderID(input1) - if err != nil { - t.Fatalf("Expected no errors, got %v", err) + tests := []struct { + name string + input string + expectedID string + }{ + { + name: "2 slashes after colon, one segment", + input: "aws://instance-id", + expectedID: "instance-id", + }, + { + name: "more than 2 slashes after colon, one segment", + input: "aws:////instance-id", + expectedID: "instance-id", + }, + { + name: "multiple filled-in segments (aws format)", + input: "aws:///zone/instance-id", + expectedID: "instance-id", + }, + { + name: "multiple filled-in segments", + input: "aws://bar/baz/instance-id", + expectedID: "instance-id", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + id, err := NewProviderID(tc.input) + if err != nil { + t.Fatalf("Expected no errors, got %v", err) + } + + if id.CloudProvider() != aws { + t.Errorf("Unexpected cloud provider: %q", id.CloudProvider()) + } + + if e, a := tc.expectedID, id.ID(); e != a { + t.Errorf("Expected %q, got %q", e, a) + } + }) } + } func TestInvalidProviderID(t *testing.T) { testCases := []struct { + name string input string err error }{ { + name: "empty id", + input: "", + err: ErrEmptyProviderID, + }, + { + name: "only empty segments", input: "aws:///////", err: ErrInvalidProviderID, }, { - input: ":///instance-id", - err: errors.New("missing protocol scheme"), + name: "missing cloud provider", + input: "://instance-id", + err: ErrInvalidProviderID, }, { - input: "///instance-id", + name: "missing cloud provider and colon", + input: "//instance-id", err: ErrInvalidProviderID, }, { + name: "missing cloud provider, colon, one leading slash", input: "/instance-id", err: ErrInvalidProviderID, }, { + name: "just an id", input: "instance-id", err: ErrInvalidProviderID, }, - { - input: "#IAmTheSenate", - err: ErrInvalidProviderID, - }, } for _, test := range testCases { - _, err := NewProviderID(test.input) - if !strings.Contains(err.Error(), test.err.Error()) { - t.Fatalf("Expected error %v, got %v", test.err, err) - } + t.Run(test.name, func(t *testing.T) { + _, err := NewProviderID(test.input) + if test.err != err { + t.Fatalf("Expected error %v, got %v", test.err, err) + } + }) + } } @@ -85,7 +133,7 @@ func TestProviderIDEquals(t *testing.T) { t.Fatalf("Expected valid ID, got %v", parsed1.ID()) } - if parsed1.CloudProvider() != "aws" { + if parsed1.CloudProvider() != aws { t.Fatalf("Expected valid CloudProvider, got %v", parsed1.CloudProvider()) } @@ -103,12 +151,11 @@ func TestProviderIDEquals(t *testing.T) { t.Fatalf("Expected valid ID, got %v", parsed2.ID()) } - if parsed2.CloudProvider() != "aws" { + if parsed2.CloudProvider() != aws { t.Fatalf("Expected valid CloudProvider, got %v", parsed2.CloudProvider()) } if !parsed1.Equals(parsed2) { t.Fatal("Expected ProviderIDs to be equal") } - } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel index eb29fb1085..5d95383326 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//pkg/apis/cluster/v1alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -25,4 +26,9 @@ go_test( name = "go_default_test", srcs = ["util_test.go"], embed = [":go_default_library"], + deps = [ + "//pkg/apis/cluster/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go index 76af756efc..929a35c6e7 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go @@ -30,6 +30,7 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/yaml" @@ -312,4 +313,32 @@ func decodeClusterV1Kinds(decoder *yaml.YAMLOrJSONDecoder, kind string) ([][]byt } return outs, nil + +} + +// Returns true if any of the owner references point to the given target +func PointsTo(refs []metav1.OwnerReference, target *metav1.ObjectMeta) bool { + for _, ref := range refs { + if ref.UID == target.UID { + return true + } + } + + return false +} + +// HasOwner checks if any of the references in the past list match the given apiVersion and one of the given kinds +func HasOwner(refList []metav1.OwnerReference, apiVersion string, kinds []string) bool { + kMap := make(map[string]bool) + for _, kind := range kinds { + kMap[kind] = true + } + + for _, mr := range refList { + if mr.APIVersion == apiVersion && kMap[mr.Kind] { + return true + } + } + + return false } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go index e662e1d185..f5e53f34d1 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go @@ -20,6 +20,10 @@ import ( "io/ioutil" "os" "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) const validCluster = ` @@ -334,3 +338,135 @@ func createTempFile(contents string) (string, error) { f.WriteString(contents) return f.Name(), nil } + +func TestPointsTo(t *testing.T) { + targetID := "fri3ndsh1p" + + meta := metav1.ObjectMeta{ + UID: types.UID(targetID), + } + + tests := []struct { + name string + refIDs []string + expected bool + }{ + { + name: "empty owner list", + }, + { + name: "single wrong owner ref", + refIDs: []string{"m4g1c"}, + }, + { + name: "single right owner ref", + refIDs: []string{targetID}, + expected: true, + }, + { + name: "multiple wrong refs", + refIDs: []string{"m4g1c", "h4rm0ny"}, + }, + { + name: "multiple refs one right", + refIDs: []string{"m4g1c", targetID}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pointer := &metav1.ObjectMeta{} + + for _, ref := range test.refIDs { + pointer.OwnerReferences = append(pointer.OwnerReferences, metav1.OwnerReference{ + UID: types.UID(ref), + }) + } + + result := PointsTo(pointer.OwnerReferences, &meta) + if result != test.expected { + t.Errorf("expected %v, got %v", test.expected, result) + } + }) + } +} + +func TestHasOwner(t *testing.T) { + + tests := []struct { + name string + refList []metav1.OwnerReference + expected bool + }{ + { + name: "no ownership", + }, + { + name: "owned by cluster", + refList: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }, + expected: true, + }, + + { + name: "owned by something else", + refList: []metav1.OwnerReference{ + { + Kind: "Pod", + APIVersion: "v1", + }, + { + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }, + }, + { + name: "owner by a deployment", + refList: []metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }, + expected: true, + }, + { + name: "right kind, wrong apiversion", + refList: []metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: "wrong/v2", + }, + }, + }, + { + + name: "right apiversion, wrong kind", + refList: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := HasOwner( + test.refList, + v1alpha1.SchemeGroupVersion.String(), + []string{"MachineDeployment", "Cluster"}, + ) + if test.expected != result { + t.Errorf("expected hasOwner to be %v, got %v", test.expected, result) + } + }) + } +} diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh b/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh index aa6957f623..644f03f554 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh @@ -19,9 +19,7 @@ set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${REPO_ROOT}/hack/ensure-go.sh" cd "${REPO_ROOT}" -find vendor -name 'BUILD.bazel' -delete - -go get -u github.com/golang/dep/cmd/dep -dep check +go mod verify diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh b/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh index cff626c9c5..ed69ebbf8f 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh @@ -19,6 +19,7 @@ set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${REPO_ROOT}/hack/ensure-go.sh" cd "${REPO_ROOT}" && \ source ./scripts/fetch_ext_bins.sh && \ diff --git a/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt b/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt new file mode 100644 index 0000000000..5e228ec183 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt @@ -0,0 +1,666 @@ +# cloud.google.com/go v0.35.1 +cloud.google.com/go/compute/metadata +# contrib.go.opencensus.io/exporter/ocagent v0.2.0 +contrib.go.opencensus.io/exporter/ocagent +# github.com/Azure/go-autorest v11.5.0+incompatible +github.com/Azure/go-autorest/autorest +github.com/Azure/go-autorest/autorest/adal +github.com/Azure/go-autorest/autorest/azure +github.com/Azure/go-autorest/logger +github.com/Azure/go-autorest/tracing +github.com/Azure/go-autorest/autorest/date +# github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 +github.com/appscode/jsonpatch +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +github.com/beorn7/perks/quantile +# github.com/census-instrumentation/opencensus-proto v0.1.0 +github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 +# github.com/davecgh/go-spew v1.1.1 +github.com/davecgh/go-spew/spew +# github.com/dgrijalva/jwt-go v3.2.0+incompatible +github.com/dgrijalva/jwt-go +# github.com/evanphx/json-patch v4.2.0+incompatible +github.com/evanphx/json-patch +# github.com/ghodss/yaml v1.0.0 +github.com/ghodss/yaml +# github.com/go-logr/logr v0.1.0 +github.com/go-logr/logr +# github.com/go-logr/zapr v0.1.0 +github.com/go-logr/zapr +# github.com/gobuffalo/envy v1.6.12 +github.com/gobuffalo/envy +# github.com/gogo/protobuf v1.2.0 +github.com/gogo/protobuf/proto +github.com/gogo/protobuf/sortkeys +# github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff +github.com/golang/groupcache/lru +# github.com/golang/protobuf v1.2.0 +github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/wrappers +# github.com/google/btree v1.0.0 +github.com/google/btree +# github.com/google/gofuzz v1.0.0 +github.com/google/gofuzz +# github.com/google/uuid v1.1.1 +github.com/google/uuid +# github.com/googleapis/gnostic v0.2.0 +github.com/googleapis/gnostic/OpenAPIv2 +github.com/googleapis/gnostic/compiler +github.com/googleapis/gnostic/extensions +# github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 +github.com/gophercloud/gophercloud +github.com/gophercloud/gophercloud/openstack +github.com/gophercloud/gophercloud/openstack/identity/v2/tokens +github.com/gophercloud/gophercloud/openstack/identity/v3/tokens +github.com/gophercloud/gophercloud/openstack/utils +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/pagination +# github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f +github.com/gregjones/httpcache +github.com/gregjones/httpcache/diskcache +# github.com/hashicorp/golang-lru v0.5.0 +github.com/hashicorp/golang-lru +github.com/hashicorp/golang-lru/simplelru +# github.com/hpcloud/tail v1.0.0 +github.com/hpcloud/tail +github.com/hpcloud/tail/ratelimiter +github.com/hpcloud/tail/util +github.com/hpcloud/tail/watch +github.com/hpcloud/tail/winfile +# github.com/imdario/mergo v0.3.7 +github.com/imdario/mergo +# github.com/inconshreveable/mousetrap v1.0.0 +github.com/inconshreveable/mousetrap +# github.com/joho/godotenv v1.3.0 +github.com/joho/godotenv +# github.com/json-iterator/go v1.1.6 +github.com/json-iterator/go +# github.com/markbates/inflect v1.0.4 +github.com/markbates/inflect +# github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd +github.com/modern-go/concurrent +# github.com/modern-go/reflect2 v1.0.1 +github.com/modern-go/reflect2 +# github.com/onsi/ginkgo v1.8.0 +github.com/onsi/ginkgo +github.com/onsi/ginkgo/config +github.com/onsi/ginkgo/internal/codelocation +github.com/onsi/ginkgo/internal/failer +github.com/onsi/ginkgo/internal/remote +github.com/onsi/ginkgo/internal/suite +github.com/onsi/ginkgo/internal/testingtproxy +github.com/onsi/ginkgo/internal/writer +github.com/onsi/ginkgo/reporters +github.com/onsi/ginkgo/reporters/stenographer +github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable +github.com/onsi/ginkgo/types +github.com/onsi/ginkgo/internal/spec_iterator +github.com/onsi/ginkgo/internal/containernode +github.com/onsi/ginkgo/internal/leafnodes +github.com/onsi/ginkgo/internal/spec +github.com/onsi/ginkgo/internal/specrunner +github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +# github.com/onsi/gomega v1.5.0 +github.com/onsi/gomega +github.com/onsi/gomega/gbytes +github.com/onsi/gomega/gexec +github.com/onsi/gomega/internal/assertion +github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/testingtsupport +github.com/onsi/gomega/matchers +github.com/onsi/gomega/types +github.com/onsi/gomega/format +github.com/onsi/gomega/internal/oraclematcher +github.com/onsi/gomega/matchers/support/goraph/bipartitegraph +github.com/onsi/gomega/matchers/support/goraph/edge +github.com/onsi/gomega/matchers/support/goraph/node +github.com/onsi/gomega/matchers/support/goraph/util +# github.com/pborman/uuid v1.2.0 +github.com/pborman/uuid +# github.com/peterbourgon/diskv v2.0.1+incompatible +github.com/peterbourgon/diskv +# github.com/pkg/errors v0.8.1 +github.com/pkg/errors +# github.com/prometheus/client_golang v0.9.2 +github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/internal +# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f +github.com/prometheus/client_model/go +# github.com/prometheus/common v0.1.0 +github.com/prometheus/common/expfmt +github.com/prometheus/common/model +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +# github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 +github.com/prometheus/procfs +github.com/prometheus/procfs/nfs +github.com/prometheus/procfs/xfs +github.com/prometheus/procfs/internal/util +# github.com/rogpeppe/go-internal v1.1.0 +github.com/rogpeppe/go-internal/modfile +github.com/rogpeppe/go-internal/module +github.com/rogpeppe/go-internal/semver +# github.com/sergi/go-diff v1.0.0 +github.com/sergi/go-diff/diffmatchpatch +# github.com/spf13/afero v1.2.2 +github.com/spf13/afero +github.com/spf13/afero/mem +# github.com/spf13/cobra v0.0.3 +github.com/spf13/cobra +# github.com/spf13/pflag v1.0.3 +github.com/spf13/pflag +# go.opencensus.io v0.18.0 +go.opencensus.io/plugin/ochttp +go.opencensus.io/plugin/ochttp/propagation/tracecontext +go.opencensus.io/stats/view +go.opencensus.io/trace +go.opencensus.io +go.opencensus.io/trace/tracestate +go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/stats +go.opencensus.io/tag +go.opencensus.io/trace/propagation +go.opencensus.io/exemplar +go.opencensus.io/internal/tagencoding +go.opencensus.io/stats/internal +go.opencensus.io/internal +go.opencensus.io/trace/internal +# go.uber.org/atomic v1.3.2 +go.uber.org/atomic +# go.uber.org/multierr v1.1.0 +go.uber.org/multierr +# go.uber.org/zap v1.9.1 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/zapcore +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +# golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +golang.org/x/crypto/ssh/terminal +# golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 +golang.org/x/net/http2 +golang.org/x/net/context +golang.org/x/net/http/httpguts +golang.org/x/net/http2/hpack +golang.org/x/net/idna +golang.org/x/net/context/ctxhttp +golang.org/x/net/html/charset +golang.org/x/net/html +golang.org/x/net/html/atom +golang.org/x/net/trace +golang.org/x/net/internal/timeseries +# golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c +golang.org/x/oauth2 +golang.org/x/oauth2/google +golang.org/x/oauth2/internal +golang.org/x/oauth2/jws +golang.org/x/oauth2/jwt +# golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 +golang.org/x/sync/semaphore +# golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f +golang.org/x/sys/unix +golang.org/x/sys/windows +# golang.org/x/text v0.3.2 +golang.org/x/text/secure/bidirule +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm +golang.org/x/text/transform +golang.org/x/text/encoding +golang.org/x/text/encoding/charmap +golang.org/x/text/encoding/htmlindex +golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/internal +golang.org/x/text/encoding/japanese +golang.org/x/text/encoding/korean +golang.org/x/text/encoding/simplifiedchinese +golang.org/x/text/encoding/traditionalchinese +golang.org/x/text/encoding/unicode +golang.org/x/text/language +golang.org/x/text/internal/utf8internal +golang.org/x/text/runes +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/internal/tag +# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c +golang.org/x/time/rate +# golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b +golang.org/x/tools/imports +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/packages +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/module +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/cgo +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/internal/semver +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/go/internal/gcimporter +# google.golang.org/api v0.1.0 +google.golang.org/api/support/bundler +# google.golang.org/appengine v1.4.0 +google.golang.org/appengine +google.golang.org/appengine/urlfetch +google.golang.org/appengine/internal +google.golang.org/appengine/internal/app_identity +google.golang.org/appengine/internal/modules +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/internal/base +google.golang.org/appengine/internal/datastore +google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/remote_api +# google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.18.0 +google.golang.org/grpc +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap +google.golang.org/grpc/balancer/base +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/internal/syscall +# gopkg.in/fsnotify.v1 v1.4.7 +gopkg.in/fsnotify.v1 +# gopkg.in/inf.v0 v0.9.1 +gopkg.in/inf.v0 +# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +gopkg.in/tomb.v1 +# gopkg.in/yaml.v2 v2.2.2 +gopkg.in/yaml.v2 +# k8s.io/api v0.0.0-20190222213804-5cb15d344471 +k8s.io/api/autoscaling/v1 +k8s.io/api/core/v1 +k8s.io/api/apps/v1 +k8s.io/api/authentication/v1 +k8s.io/api/policy/v1beta1 +k8s.io/api/admissionregistration/v1alpha1 +k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apps/v1beta1 +k8s.io/api/apps/v1beta2 +k8s.io/api/auditregistration/v1alpha1 +k8s.io/api/authentication/v1beta1 +k8s.io/api/authorization/v1 +k8s.io/api/authorization/v1beta1 +k8s.io/api/autoscaling/v2beta1 +k8s.io/api/autoscaling/v2beta2 +k8s.io/api/batch/v1 +k8s.io/api/batch/v1beta1 +k8s.io/api/batch/v2alpha1 +k8s.io/api/certificates/v1beta1 +k8s.io/api/coordination/v1beta1 +k8s.io/api/events/v1beta1 +k8s.io/api/extensions/v1beta1 +k8s.io/api/networking/v1 +k8s.io/api/rbac/v1 +k8s.io/api/rbac/v1alpha1 +k8s.io/api/rbac/v1beta1 +k8s.io/api/scheduling/v1alpha1 +k8s.io/api/scheduling/v1beta1 +k8s.io/api/settings/v1alpha1 +k8s.io/api/storage/v1 +k8s.io/api/storage/v1alpha1 +k8s.io/api/storage/v1beta1 +k8s.io/api/admission/v1beta1 +# k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme +# k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/yaml +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/apis/meta/v1/validation +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/api/equality +k8s.io/apimachinery/pkg/util/rand +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/versioning +k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/util/uuid +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/pkg/apis/meta/internalversion +# k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa +k8s.io/apiserver/pkg/storage/names +# k8s.io/client-go v10.0.0+incompatible +k8s.io/client-go/kubernetes +k8s.io/client-go/rest +k8s.io/client-go/tools/clientcmd +k8s.io/client-go/tools/clientcmd/api +k8s.io/client-go/plugin/pkg/client/auth +k8s.io/client-go/kubernetes/typed/core/v1 +k8s.io/client-go/plugin/pkg/client/auth/gcp +k8s.io/client-go/discovery +k8s.io/client-go/util/flowcontrol +k8s.io/client-go/discovery/fake +k8s.io/client-go/testing +k8s.io/client-go/tools/cache +k8s.io/client-go/tools/leaderelection/resourcelock +k8s.io/client-go/tools/record +k8s.io/client-go/util/integer +k8s.io/client-go/util/retry +k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 +k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 +k8s.io/client-go/kubernetes/typed/apps/v1 +k8s.io/client-go/kubernetes/typed/apps/v1beta1 +k8s.io/client-go/kubernetes/typed/apps/v1beta2 +k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1 +k8s.io/client-go/kubernetes/typed/authentication/v1 +k8s.io/client-go/kubernetes/typed/authentication/v1beta1 +k8s.io/client-go/kubernetes/typed/authorization/v1 +k8s.io/client-go/kubernetes/typed/authorization/v1beta1 +k8s.io/client-go/kubernetes/typed/autoscaling/v1 +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 +k8s.io/client-go/kubernetes/typed/batch/v1 +k8s.io/client-go/kubernetes/typed/batch/v1beta1 +k8s.io/client-go/kubernetes/typed/batch/v2alpha1 +k8s.io/client-go/kubernetes/typed/certificates/v1beta1 +k8s.io/client-go/kubernetes/typed/coordination/v1beta1 +k8s.io/client-go/kubernetes/typed/events/v1beta1 +k8s.io/client-go/kubernetes/typed/extensions/v1beta1 +k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/policy/v1beta1 +k8s.io/client-go/kubernetes/typed/rbac/v1 +k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 +k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 +k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 +k8s.io/client-go/kubernetes/typed/settings/v1alpha1 +k8s.io/client-go/kubernetes/typed/storage/v1 +k8s.io/client-go/kubernetes/typed/storage/v1alpha1 +k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/rest/watch +k8s.io/client-go/tools/metrics +k8s.io/client-go/transport +k8s.io/client-go/util/cert +k8s.io/client-go/tools/auth +k8s.io/client-go/tools/clientcmd/api/latest +k8s.io/client-go/util/homedir +k8s.io/client-go/plugin/pkg/client/auth/azure +k8s.io/client-go/plugin/pkg/client/auth/oidc +k8s.io/client-go/plugin/pkg/client/auth/openstack +k8s.io/client-go/dynamic +k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/tools/leaderelection +k8s.io/client-go/tools/reference +k8s.io/client-go/util/jsonpath +k8s.io/client-go/tools/pager +k8s.io/client-go/util/buffer +k8s.io/client-go/util/workqueue +k8s.io/client-go/kubernetes/fake +k8s.io/client-go/informers +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/util/connrotation +k8s.io/client-go/tools/clientcmd/api/v1 +k8s.io/client-go/restmapper +k8s.io/client-go/third_party/forked/golang/template +k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake +k8s.io/client-go/kubernetes/typed/apps/v1/fake +k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake +k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake +k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/authentication/v1/fake +k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake +k8s.io/client-go/kubernetes/typed/authorization/v1/fake +k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake +k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake +k8s.io/client-go/kubernetes/typed/batch/v1/fake +k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake +k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake +k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake +k8s.io/client-go/kubernetes/typed/core/v1/fake +k8s.io/client-go/kubernetes/typed/events/v1beta1/fake +k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake +k8s.io/client-go/kubernetes/typed/networking/v1/fake +k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake +k8s.io/client-go/kubernetes/typed/rbac/v1/fake +k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake +k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake +k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/storage/v1/fake +k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake +k8s.io/client-go/informers/admissionregistration +k8s.io/client-go/informers/apps +k8s.io/client-go/informers/auditregistration +k8s.io/client-go/informers/autoscaling +k8s.io/client-go/informers/batch +k8s.io/client-go/informers/certificates +k8s.io/client-go/informers/coordination +k8s.io/client-go/informers/core +k8s.io/client-go/informers/events +k8s.io/client-go/informers/extensions +k8s.io/client-go/informers/internalinterfaces +k8s.io/client-go/informers/networking +k8s.io/client-go/informers/policy +k8s.io/client-go/informers/rbac +k8s.io/client-go/informers/scheduling +k8s.io/client-go/informers/settings +k8s.io/client-go/informers/storage +k8s.io/client-go/informers/admissionregistration/v1alpha1 +k8s.io/client-go/informers/admissionregistration/v1beta1 +k8s.io/client-go/informers/apps/v1 +k8s.io/client-go/informers/apps/v1beta1 +k8s.io/client-go/informers/apps/v1beta2 +k8s.io/client-go/informers/auditregistration/v1alpha1 +k8s.io/client-go/informers/autoscaling/v1 +k8s.io/client-go/informers/autoscaling/v2beta1 +k8s.io/client-go/informers/autoscaling/v2beta2 +k8s.io/client-go/informers/batch/v1 +k8s.io/client-go/informers/batch/v1beta1 +k8s.io/client-go/informers/batch/v2alpha1 +k8s.io/client-go/informers/certificates/v1beta1 +k8s.io/client-go/informers/coordination/v1beta1 +k8s.io/client-go/informers/core/v1 +k8s.io/client-go/informers/events/v1beta1 +k8s.io/client-go/informers/extensions/v1beta1 +k8s.io/client-go/informers/networking/v1 +k8s.io/client-go/informers/policy/v1beta1 +k8s.io/client-go/informers/rbac/v1 +k8s.io/client-go/informers/rbac/v1alpha1 +k8s.io/client-go/informers/rbac/v1beta1 +k8s.io/client-go/informers/scheduling/v1alpha1 +k8s.io/client-go/informers/scheduling/v1beta1 +k8s.io/client-go/informers/settings/v1alpha1 +k8s.io/client-go/informers/storage/v1 +k8s.io/client-go/informers/storage/v1alpha1 +k8s.io/client-go/informers/storage/v1beta1 +k8s.io/client-go/listers/admissionregistration/v1alpha1 +k8s.io/client-go/listers/admissionregistration/v1beta1 +k8s.io/client-go/listers/apps/v1 +k8s.io/client-go/listers/apps/v1beta1 +k8s.io/client-go/listers/apps/v1beta2 +k8s.io/client-go/listers/auditregistration/v1alpha1 +k8s.io/client-go/listers/autoscaling/v1 +k8s.io/client-go/listers/autoscaling/v2beta1 +k8s.io/client-go/listers/autoscaling/v2beta2 +k8s.io/client-go/listers/batch/v1 +k8s.io/client-go/listers/batch/v1beta1 +k8s.io/client-go/listers/batch/v2alpha1 +k8s.io/client-go/listers/certificates/v1beta1 +k8s.io/client-go/listers/coordination/v1beta1 +k8s.io/client-go/listers/core/v1 +k8s.io/client-go/listers/events/v1beta1 +k8s.io/client-go/listers/extensions/v1beta1 +k8s.io/client-go/listers/networking/v1 +k8s.io/client-go/listers/policy/v1beta1 +k8s.io/client-go/listers/rbac/v1 +k8s.io/client-go/listers/rbac/v1alpha1 +k8s.io/client-go/listers/rbac/v1beta1 +k8s.io/client-go/listers/scheduling/v1alpha1 +k8s.io/client-go/listers/scheduling/v1beta1 +k8s.io/client-go/listers/settings/v1alpha1 +k8s.io/client-go/listers/storage/v1 +k8s.io/client-go/listers/storage/v1alpha1 +k8s.io/client-go/listers/storage/v1beta1 +# k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b +k8s.io/code-generator/cmd/client-gen +k8s.io/code-generator/cmd/conversion-gen +k8s.io/code-generator/cmd/deepcopy-gen +k8s.io/code-generator/cmd/defaulter-gen +k8s.io/code-generator/cmd/informer-gen +k8s.io/code-generator/cmd/lister-gen +k8s.io/code-generator/cmd/register-gen +k8s.io/code-generator/cmd/set-gen +k8s.io/code-generator/cmd/client-gen/args +k8s.io/code-generator/cmd/client-gen/generators +k8s.io/code-generator/pkg/util +k8s.io/code-generator/cmd/conversion-gen/args +k8s.io/code-generator/cmd/conversion-gen/generators +k8s.io/code-generator/cmd/deepcopy-gen/args +k8s.io/code-generator/cmd/defaulter-gen/args +k8s.io/code-generator/cmd/informer-gen/args +k8s.io/code-generator/cmd/informer-gen/generators +k8s.io/code-generator/cmd/lister-gen/args +k8s.io/code-generator/cmd/lister-gen/generators +k8s.io/code-generator/cmd/register-gen/args +k8s.io/code-generator/cmd/register-gen/generators +k8s.io/code-generator/cmd/client-gen/types +k8s.io/code-generator/cmd/client-gen/generators/fake +k8s.io/code-generator/cmd/client-gen/generators/scheme +k8s.io/code-generator/cmd/client-gen/generators/util +k8s.io/code-generator/cmd/client-gen/path +# k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d +k8s.io/component-base/cli/flag +# k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af +k8s.io/gengo/args +k8s.io/gengo/examples/deepcopy-gen/generators +k8s.io/gengo/examples/defaulter-gen/generators +k8s.io/gengo/examples/set-gen/generators +k8s.io/gengo/generator +k8s.io/gengo/namer +k8s.io/gengo/types +k8s.io/gengo/parser +k8s.io/gengo/examples/set-gen/sets +# k8s.io/klog v0.3.2 +k8s.io/klog +k8s.io/klog/klogr +# k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 +k8s.io/kube-openapi/pkg/util/proto +# sigs.k8s.io/controller-runtime v0.1.12 +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/manager +sigs.k8s.io/controller-runtime/pkg/runtime/signals +sigs.k8s.io/controller-runtime/pkg/runtime/log +sigs.k8s.io/controller-runtime/pkg/runtime/scheme +sigs.k8s.io/controller-runtime/pkg/controller +sigs.k8s.io/controller-runtime/pkg/handler +sigs.k8s.io/controller-runtime/pkg/reconcile +sigs.k8s.io/controller-runtime/pkg/source +sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/leaderelection +sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/recorder +sigs.k8s.io/controller-runtime/pkg/runtime/inject +sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/admission/types +sigs.k8s.io/controller-runtime/pkg/envtest +sigs.k8s.io/controller-runtime/pkg/internal/controller +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/source/internal +sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/patch +sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics +sigs.k8s.io/controller-runtime/pkg/webhook/types +sigs.k8s.io/controller-runtime/pkg/envtest/printer +sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics +sigs.k8s.io/controller-runtime/pkg/internal/objectutil +# sigs.k8s.io/controller-tools v0.1.11 +sigs.k8s.io/controller-tools/cmd/controller-gen +sigs.k8s.io/controller-tools/pkg/crd/generator +sigs.k8s.io/controller-tools/pkg/rbac +sigs.k8s.io/controller-tools/pkg/webhook +sigs.k8s.io/controller-tools/pkg/crd/util +sigs.k8s.io/controller-tools/pkg/internal/codegen +sigs.k8s.io/controller-tools/pkg/internal/codegen/parse +sigs.k8s.io/controller-tools/pkg/util +sigs.k8s.io/controller-tools/pkg/internal/general +# sigs.k8s.io/testing_frameworks v0.1.1 +sigs.k8s.io/testing_frameworks/integration +sigs.k8s.io/testing_frameworks/integration/addr +sigs.k8s.io/testing_frameworks/integration/internal +# sigs.k8s.io/yaml v1.1.0 +sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go index a08cf751b5..d1e8c3fc03 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go @@ -21,7 +21,8 @@ import ( "log" "strings" - "github.com/markbates/inflect" + "github.com/gobuffalo/flect" + "k8s.io/gengo/types" "sigs.k8s.io/controller-tools/pkg/internal/codegen" "sigs.k8s.io/controller-tools/pkg/internal/general" @@ -71,8 +72,7 @@ func (b *APIs) parseIndex() { // TODO: revisit the part... if r.Resource == "" { - rs := inflect.NewDefaultRuleset() - r.Resource = rs.Pluralize(strings.ToLower(r.Kind)) + r.Resource = flect.Pluralize(strings.ToLower(r.Kind)) } rt, err := parseResourceAnnotation(c) if err != nil {