From 377c5ac3befbb256408519ba34335bc80a2d7392 Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Sun, 7 Mar 2021 22:40:47 +0800 Subject: [PATCH 1/8] add token service support for docker registry - repo name ruler /[/] (note: if `` not set, will use `` as default) - add a new repo permission unit named as `package`, it's not a real unit in gitea, just use for permission check. - add a small api `GET /repos/{owner}/{repo}/packages/{type}/{name}` for test, maybe can add more later ref: - https://docs.docker.com/registry/spec/auth/token/ - https://docs.docker.com/registry/notifications/ - https://github.com/cesanta/docker_auth Signed-off-by: a1012112796 <1012112796@qq.com> --- custom/conf/app.example.ini | 33 + go.mod | 5 + go.sum | 16 + models/error.go | 19 + models/migrations/migrations.go | 2 + models/migrations/v175.go | 28 + models/models.go | 1 + models/package.go | 102 +++ models/repo_unit.go | 2 +- models/unit.go | 14 + modules/context/repo.go | 1 + modules/convert/package.go | 24 + modules/forms/repo_form.go | 1 + modules/setting/package.go | 84 +++ modules/setting/setting.go | 1 + modules/structs/package.go | 21 + options/locale/locale_en-US.ini | 3 + routers/api/v1/api.go | 3 + routers/api/v1/repo/package.go | 74 ++ routers/api/v1/swagger/repo.go | 14 + routers/repo/package.go | 421 +++++++++++ routers/repo/setting.go | 9 + routers/routes/web.go | 2 + templates/repo/settings/options.tmpl | 14 + templates/swagger/v1_json.tmpl | 100 +++ .../github.com/docker/distribution/.gitignore | 38 + .../docker/distribution/.gometalinter.json | 16 + .../github.com/docker/distribution/.mailmap | 32 + .../docker/distribution/.travis.yml | 51 ++ .../docker/distribution/BUILDING.md | 117 +++ .../docker/distribution/CONTRIBUTING.md | 148 ++++ .../github.com/docker/distribution/Dockerfile | 23 + vendor/github.com/docker/distribution/LICENSE | 202 ++++++ .../docker/distribution/MAINTAINERS | 243 +++++++ .../github.com/docker/distribution/Makefile | 102 +++ .../github.com/docker/distribution/README.md | 130 ++++ .../github.com/docker/distribution/ROADMAP.md | 267 +++++++ .../github.com/docker/distribution/blobs.go | 265 +++++++ .../configuration/configuration.go | 684 ++++++++++++++++++ .../distribution/configuration/parser.go | 283 ++++++++ .../docker/distribution/context/context.go | 73 ++ .../docker/distribution/context/doc.go | 88 +++ .../docker/distribution/context/http.go | 337 +++++++++ .../docker/distribution/context/logger.go | 121 ++++ .../docker/distribution/context/trace.go | 105 +++ .../docker/distribution/context/util.go | 25 + .../docker/distribution/context/version.go | 22 + .../docker/distribution/digestset/set.go | 247 +++++++ vendor/github.com/docker/distribution/doc.go | 7 + .../github.com/docker/distribution/errors.go | 119 +++ .../docker/distribution/manifests.go | 125 ++++ .../distribution/notifications/bridge.go | 234 ++++++ .../distribution/notifications/endpoint.go | 97 +++ .../distribution/notifications/event.go | 163 +++++ .../docker/distribution/notifications/http.go | 150 ++++ .../distribution/notifications/listener.go | 263 +++++++ .../distribution/notifications/metrics.go | 152 ++++ .../distribution/notifications/sinks.go | 392 ++++++++++ .../docker/distribution/reference/helpers.go | 42 ++ .../distribution/reference/normalize.go | 170 +++++ .../distribution/reference/reference.go | 433 +++++++++++ .../docker/distribution/reference/regexp.go | 143 ++++ .../docker/distribution/registry.go | 118 +++ .../docker/distribution/registry/auth/auth.go | 201 +++++ .../registry/auth/token/accesscontroller.go | 293 ++++++++ .../registry/auth/token/stringset.go | 35 + .../distribution/registry/auth/token/token.go | 378 ++++++++++ .../distribution/registry/auth/token/util.go | 58 ++ vendor/github.com/docker/distribution/tags.go | 27 + .../docker/distribution/uuid/uuid.go | 126 ++++ .../docker/distribution/vendor.conf | 51 ++ .../docker/libtrust/CONTRIBUTING.md | 13 + vendor/github.com/docker/libtrust/LICENSE | 191 +++++ vendor/github.com/docker/libtrust/MAINTAINERS | 3 + vendor/github.com/docker/libtrust/README.md | 22 + .../docker/libtrust/certificates.go | 175 +++++ vendor/github.com/docker/libtrust/doc.go | 9 + vendor/github.com/docker/libtrust/ec_key.go | 428 +++++++++++ vendor/github.com/docker/libtrust/filter.go | 50 ++ vendor/github.com/docker/libtrust/hash.go | 56 ++ vendor/github.com/docker/libtrust/jsonsign.go | 657 +++++++++++++++++ vendor/github.com/docker/libtrust/key.go | 253 +++++++ .../github.com/docker/libtrust/key_files.go | 255 +++++++ .../github.com/docker/libtrust/key_manager.go | 175 +++++ vendor/github.com/docker/libtrust/rsa_key.go | 427 +++++++++++ vendor/github.com/docker/libtrust/util.go | 363 ++++++++++ .../go-windows-terminal-sequences/LICENSE | 9 + .../go-windows-terminal-sequences/README.md | 42 ++ .../go-windows-terminal-sequences/go.mod | 1 + .../sequences.go | 35 + .../sequences_dummy.go | 11 + .../opencontainers/go-digest/.mailmap | 4 + .../opencontainers/go-digest/.pullapprove.yml | 28 + .../opencontainers/go-digest/.travis.yml | 5 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 ++ .../opencontainers/go-digest/LICENSE | 192 +++++ .../opencontainers/go-digest/LICENSE.docs | 425 +++++++++++ .../opencontainers/go-digest/MAINTAINERS | 5 + .../opencontainers/go-digest/README.md | 96 +++ .../opencontainers/go-digest/algorithm.go | 193 +++++ .../opencontainers/go-digest/digest.go | 157 ++++ .../opencontainers/go-digest/digester.go | 40 + .../opencontainers/go-digest/doc.go | 62 ++ .../opencontainers/go-digest/go.mod | 3 + .../opencontainers/go-digest/verifiers.go | 46 ++ .../opencontainers/image-spec/LICENSE | 191 +++++ .../image-spec/specs-go/v1/annotations.go | 56 ++ .../image-spec/specs-go/v1/config.go | 103 +++ .../image-spec/specs-go/v1/descriptor.go | 64 ++ .../image-spec/specs-go/v1/index.go | 29 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 32 + .../image-spec/specs-go/v1/mediatype.go | 48 ++ .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/sirupsen/logrus/.gitignore | 2 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 17 + .../github.com/sirupsen/logrus/CHANGELOG.md | 223 ++++++ vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 513 +++++++++++++ vendor/github.com/sirupsen/logrus/alt_exit.go | 76 ++ .../github.com/sirupsen/logrus/appveyor.yml | 14 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 426 +++++++++++ vendor/github.com/sirupsen/logrus/exported.go | 225 ++++++ .../github.com/sirupsen/logrus/formatter.go | 78 ++ vendor/github.com/sirupsen/logrus/go.mod | 11 + vendor/github.com/sirupsen/logrus/go.sum | 12 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 125 ++++ vendor/github.com/sirupsen/logrus/logger.go | 352 +++++++++ vendor/github.com/sirupsen/logrus/logrus.go | 186 +++++ .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 34 + .../sirupsen/logrus/text_formatter.go | 334 +++++++++ vendor/github.com/sirupsen/logrus/writer.go | 70 ++ vendor/modules.txt | 27 + 144 files changed, 16427 insertions(+), 1 deletion(-) create mode 100644 models/migrations/v175.go create mode 100644 models/package.go create mode 100644 modules/convert/package.go create mode 100644 modules/setting/package.go create mode 100644 modules/structs/package.go create mode 100644 routers/api/v1/repo/package.go create mode 100644 routers/repo/package.go create mode 100644 vendor/github.com/docker/distribution/.gitignore create mode 100644 vendor/github.com/docker/distribution/.gometalinter.json create mode 100644 vendor/github.com/docker/distribution/.mailmap create mode 100644 vendor/github.com/docker/distribution/.travis.yml create mode 100644 vendor/github.com/docker/distribution/BUILDING.md create mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/distribution/Dockerfile create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/MAINTAINERS create mode 100644 vendor/github.com/docker/distribution/Makefile create mode 100644 vendor/github.com/docker/distribution/README.md create mode 100644 vendor/github.com/docker/distribution/ROADMAP.md create mode 100644 vendor/github.com/docker/distribution/blobs.go create mode 100644 vendor/github.com/docker/distribution/configuration/configuration.go create mode 100644 vendor/github.com/docker/distribution/configuration/parser.go create mode 100644 vendor/github.com/docker/distribution/context/context.go create mode 100644 vendor/github.com/docker/distribution/context/doc.go create mode 100644 vendor/github.com/docker/distribution/context/http.go create mode 100644 vendor/github.com/docker/distribution/context/logger.go create mode 100644 vendor/github.com/docker/distribution/context/trace.go create mode 100644 vendor/github.com/docker/distribution/context/util.go create mode 100644 vendor/github.com/docker/distribution/context/version.go create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/doc.go create mode 100644 vendor/github.com/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/notifications/bridge.go create mode 100644 vendor/github.com/docker/distribution/notifications/endpoint.go create mode 100644 vendor/github.com/docker/distribution/notifications/event.go create mode 100644 vendor/github.com/docker/distribution/notifications/http.go create mode 100644 vendor/github.com/docker/distribution/notifications/listener.go create mode 100644 vendor/github.com/docker/distribution/notifications/metrics.go create mode 100644 vendor/github.com/docker/distribution/notifications/sinks.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/auth.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/stringset.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/token.go create mode 100644 vendor/github.com/docker/distribution/registry/auth/token/util.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/distribution/uuid/uuid.go create mode 100644 vendor/github.com/docker/distribution/vendor.conf create mode 100644 vendor/github.com/docker/libtrust/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/libtrust/LICENSE create mode 100644 vendor/github.com/docker/libtrust/MAINTAINERS create mode 100644 vendor/github.com/docker/libtrust/README.md create mode 100644 vendor/github.com/docker/libtrust/certificates.go create mode 100644 vendor/github.com/docker/libtrust/doc.go create mode 100644 vendor/github.com/docker/libtrust/ec_key.go create mode 100644 vendor/github.com/docker/libtrust/filter.go create mode 100644 vendor/github.com/docker/libtrust/hash.go create mode 100644 vendor/github.com/docker/libtrust/jsonsign.go create mode 100644 vendor/github.com/docker/libtrust/key.go create mode 100644 vendor/github.com/docker/libtrust/key_files.go create mode 100644 vendor/github.com/docker/libtrust/key_manager.go create mode 100644 vendor/github.com/docker/libtrust/rsa_key.go create mode 100644 vendor/github.com/docker/libtrust/util.go create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/README.md create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/go.mod create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/go.mod create mode 100644 vendor/github.com/sirupsen/logrus/go.sum create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go diff --git a/custom/conf/app.example.ini b/custom/conf/app.example.ini index b86c8f1fae329..e569b1fa9fb32 100644 --- a/custom/conf/app.example.ini +++ b/custom/conf/app.example.ini @@ -1262,3 +1262,36 @@ STORAGE_TYPE = local ;MINIO_LOCATION = us-east-1 ; Minio enabled ssl only available when STORAGE_TYPE is `minio` ;MINIO_USE_SSL = false + +; a auth plugin for docker image +; example docker registry config +; auth: +; token: +; autoredirect: false +; realm: http://127.0.0.1:3000/user/docker_token +; service: gitea-token-service +; issuer: gitea +; rootcertbundle: /path/to/server.pem +; ...... +; notifications: +; endpoints: +; - name: gitea +; url: http://127.0.0.1:3000/user/docker_event +; headers: +; X-Token: ['xxxxxxxxx'] +; .............. + +[package.docker_registry_plugin] +; Enables docker registry plugin +ENABLED = false +; service name +SERVICE_NAME = gitea-token-service +; issuer name +ISSUER_NAME = gitea +; the path to key files +CERT_FILE = /path/to/server.pem +KEY_FILE = /path/to/server.key +; Expiration time for token, should biger than 60 (s) +EXPIRATION = 60 +; notify token for notify check +NOTIFY_TOKEN = xxxxxxxxx \ No newline at end of file diff --git a/go.mod b/go.mod index 82e3b3b94c160..036f36352a6d0 100644 --- a/go.mod +++ b/go.mod @@ -29,6 +29,9 @@ require ( github.com/denisenkom/go-mssqldb v0.9.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dlclark/regexp2 v1.4.0 // indirect + github.com/docker/distribution v2.7.1+incompatible + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 github.com/dustin/go-humanize v1.0.0 github.com/editorconfig/editorconfig-core-go/v2 v2.4.1 github.com/emirpasic/gods v1.12.0 @@ -98,6 +101,8 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/oliamb/cutter v0.2.2 github.com/olivere/elastic/v7 v7.0.22 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect github.com/pelletier/go-toml v1.8.1 github.com/pierrec/lz4/v4 v4.1.3 // indirect github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 7d956962ddd98..62e404872f5d7 100644 --- a/go.sum +++ b/go.sum @@ -259,8 +259,14 @@ github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55k github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= @@ -745,6 +751,7 @@ github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -917,6 +924,10 @@ github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDs github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -960,6 +971,7 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= @@ -975,6 +987,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= @@ -984,6 +997,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -1033,6 +1047,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1371,6 +1386,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/models/error.go b/models/error.go index 84b7ebbfa35b0..c1d927181ec0e 100644 --- a/models/error.go +++ b/models/error.go @@ -2139,3 +2139,22 @@ func IsErrOAuthApplicationNotFound(err error) bool { func (err ErrOAuthApplicationNotFound) Error() string { return fmt.Sprintf("OAuth application not found [ID: %d]", err.ID) } + +// Package + +// ErrPackageNotExist response an requested package is not exist +type ErrPackageNotExist struct { + RepoID int64 + Name string + Type PackageType +} + +// IsErrPackageNotExist checks if an error is a ErrPackageNotExist. +func IsErrPackageNotExist(err error) bool { + _, ok := err.(ErrPackageNotExist) + return ok +} + +func (err ErrPackageNotExist) Error() string { + return fmt.Sprintf("package does not exist [type: %s, repo_id: %d, name: %s]", err.Type.String(), err.RepoID, err.Name) +} diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go index 27c159ee4adef..708e5de9ef784 100644 --- a/models/migrations/migrations.go +++ b/models/migrations/migrations.go @@ -296,6 +296,8 @@ var migrations = []Migration{ NewMigration("Add time_id column to Comment", addTimeIDCommentColumn), // v174 -> v175 NewMigration("create repo transfer table", addRepoTransfer), + // v155 -> v176 + NewMigration("create package table", addPackageTable), } // GetCurrentDBVersion returns the current db version diff --git a/models/migrations/v175.go b/models/migrations/v175.go new file mode 100644 index 0000000000000..cf5ad53177df5 --- /dev/null +++ b/models/migrations/v175.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package migrations + +import ( + "xorm.io/xorm" +) + +func addPackageTable(x *xorm.Engine) error { + type PackageType int64 + + type Package struct { + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"INDEX"` + + RepoID int64 `xorm:"INDEX"` + + Type PackageType + + CreatedUnix int64 `xorm:"INDEX created"` + UpdatedUnix int64 `xorm:"INDEX updated"` + } + + return x.Sync(new(Package)) +} diff --git a/models/models.go b/models/models.go index ca4d1c4ad1502..3c40b3e8e172c 100644 --- a/models/models.go +++ b/models/models.go @@ -134,6 +134,7 @@ func init() { new(ProjectIssue), new(Session), new(RepoTransfer), + new(Package), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/package.go b/models/package.go new file mode 100644 index 0000000000000..af720adc64927 --- /dev/null +++ b/models/package.go @@ -0,0 +1,102 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package models + +import ( + "strings" + + "code.gitea.io/gitea/modules/timeutil" +) + +// PackageType type of package +type PackageType int64 + +const ( + // PackageTypeDockerImage a docker image (need docker_auth_port) + PackageTypeDockerImage PackageType = iota +) + +func (t PackageType) String() string { + if t == PackageTypeDockerImage { + return "docker" + } + return "" +} + +// Package an image message tmp +type Package struct { + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"INDEX"` + + RepoID int64 `xorm:"INDEX"` + Repo *Repository `xorm:"-"` + + Type PackageType + + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +} + +// AddPackageOptions options for add package +type AddPackageOptions struct { + Repo *Repository + Name string + Type PackageType +} + +// AddPackage add a new package +func AddPackage(option AddPackageOptions) error { + pkg := &Package{ + Name: option.Name, + Type: option.Type, + RepoID: option.Repo.ID, + } + pkg.LowerName = strings.ToLower(pkg.Name) + _, err := x.Insert(pkg) + return err +} + +// GetPackage get an package +func GetPackage(repoID int64, typ PackageType, name string) (*Package, error) { + return getPackage(x, repoID, typ, name) +} + +func getPackage(e Engine, repoID int64, typ PackageType, name string) (*Package, error) { + lowName := strings.ToLower(name) + pkg := new(Package) + has, err := e.Where("repo_id = ? AND type = ? AND lower_name = ?", + repoID, typ, lowName).Get(pkg) + + if err != nil { + return nil, err + } + + if !has { + return nil, ErrPackageNotExist{ + RepoID: repoID, + Name: name, + Type: typ, + } + } + + return pkg, nil +} + +// UpdateCols updates some columns +func (pkg *Package) UpdateCols(cols ...string) error { + _, err := x.ID(pkg.ID).Cols(cols...).Update(pkg) + return err +} + +// LoadRepo load package's repo +func (pkg *Package) LoadRepo(cols ...string) (err error) { + if pkg.Repo != nil { + return nil + } + + pkg.Repo, err = GetRepositoryByID(pkg.RepoID) + return err +} diff --git a/models/repo_unit.go b/models/repo_unit.go index 0feddfe2ea0cb..939738363fb99 100644 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -149,7 +149,7 @@ func (r *RepoUnit) BeforeSet(colName string, val xorm.Cell) { switch colName { case "type": switch UnitType(Cell2Int64(val)) { - case UnitTypeCode, UnitTypeReleases, UnitTypeWiki, UnitTypeProjects: + case UnitTypeCode, UnitTypeReleases, UnitTypeWiki, UnitTypeProjects, UnitTypePackages: r.Config = new(UnitConfig) case UnitTypeExternalWiki: r.Config = new(ExternalWikiConfig) diff --git a/models/unit.go b/models/unit.go index 939deba574824..8a5ed55ad55d3 100644 --- a/models/unit.go +++ b/models/unit.go @@ -25,6 +25,7 @@ const ( UnitTypeExternalWiki // 6 ExternalWiki UnitTypeExternalTracker // 7 ExternalTracker UnitTypeProjects // 8 Kanban board + UnitTypePackages // 9 packages ) // Value returns integer value for unit type @@ -50,6 +51,8 @@ func (u UnitType) String() string { return "UnitTypeExternalTracker" case UnitTypeProjects: return "UnitTypeProjects" + case UnitTypePackages: + return "UnitTypePackages" } return fmt.Sprintf("Unknown UnitType %d", u) } @@ -72,6 +75,7 @@ var ( UnitTypeExternalWiki, UnitTypeExternalTracker, UnitTypeProjects, + UnitTypePackages, } // DefaultRepoUnits contains the default unit types @@ -88,6 +92,7 @@ var ( NotAllowedDefaultRepoUnits = []UnitType{ UnitTypeExternalWiki, UnitTypeExternalTracker, + UnitTypePackages, } // MustRepoUnits contains the units could not be disabled currently @@ -255,6 +260,14 @@ var ( 5, } + UnitPacages = Unit{ + UnitTypePackages, + "repo.packages", + "/packages", + "repo.packages.desc", + 6, + } + // Units contains all the units Units = map[UnitType]Unit{ UnitTypeCode: UnitCode, @@ -265,6 +278,7 @@ var ( UnitTypeWiki: UnitWiki, UnitTypeExternalWiki: UnitExternalWiki, UnitTypeProjects: UnitProjects, + UnitTypePackages: UnitPacages, } ) diff --git a/modules/context/repo.go b/modules/context/repo.go index ba3cfe9bf256a..e668845014335 100644 --- a/modules/context/repo.go +++ b/modules/context/repo.go @@ -878,6 +878,7 @@ func UnitTypes() func(ctx *Context) { ctx.Data["UnitTypeExternalWiki"] = models.UnitTypeExternalWiki ctx.Data["UnitTypeExternalTracker"] = models.UnitTypeExternalTracker ctx.Data["UnitTypeProjects"] = models.UnitTypeProjects + ctx.Data["UnitTypePackages"] = models.UnitTypePackages } } diff --git a/modules/convert/package.go b/modules/convert/package.go new file mode 100644 index 0000000000000..0ae5fab059c0c --- /dev/null +++ b/modules/convert/package.go @@ -0,0 +1,24 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package convert + +import ( + "code.gitea.io/gitea/models" + api "code.gitea.io/gitea/modules/structs" +) + +// ToPackage convert a models.Package to api.Package +func ToPackage(pkg *models.Package) *api.Package { + rs := &api.Package{ + Name: pkg.Name, + Type: pkg.Type.String(), + Owner: ToUser(pkg.Repo.Owner, false, false), + Repo: ToRepo(pkg.Repo, models.AccessModeNone), + Created: pkg.CreatedUnix.AsTimePtr(), + Updated: pkg.UpdatedUnix.AsTimePtr(), + } + rs.Private = rs.Repo.Private + return rs +} diff --git a/modules/forms/repo_form.go b/modules/forms/repo_form.go index ab88aef571f0b..5c1cc991b997d 100644 --- a/modules/forms/repo_form.go +++ b/modules/forms/repo_form.go @@ -150,6 +150,7 @@ type RepoSettingForm struct { TrackerURLFormat string TrackerIssueStyle string EnableProjects bool + EnablePackages bool EnablePulls bool PullsIgnoreWhitespace bool PullsAllowMerge bool diff --git a/modules/setting/package.go b/modules/setting/package.go new file mode 100644 index 0000000000000..a8c50506a0b3e --- /dev/null +++ b/modules/setting/package.go @@ -0,0 +1,84 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package setting + +import ( + "crypto/tls" + "crypto/x509" + + "code.gitea.io/gitea/modules/log" + + "github.com/docker/libtrust" +) + +// settings +type dockerPluginConfig struct { + ServiceName string + IssuerName string + Expiration int64 + NotifyToken string + + PublicKey libtrust.PublicKey + PrivateKey libtrust.PrivateKey +} + +var ( + // Docker docker plugin config + Docker *dockerPluginConfig +) + +func newPackage() { + cfg := Cfg.Section("package.docker_registry_plugin") + if cfg == nil || !cfg.Key("ENABLED").MustBool(false) { + Repository.DisabledRepoUnits = append(Repository.DisabledRepoUnits, "repo.packages") + return + } + + Docker = new(dockerPluginConfig) + Docker.Expiration = cfg.Key("EXPIRATION").RangeInt64(60, 60, 3600) + + certFile := cfg.Key("CERT_FILE").String() + if len(certFile) == 0 { + log.Fatal("newPackage.docker_registry_plugin: `CERT_FILE` is requested") + } + keyFile := cfg.Key("KEY_FILE").String() + if len(keyFile) == 0 { + log.Fatal("newPackage.docker_registry_plugin: `KeyFile` is requested") + } + var err error + Docker.PublicKey, Docker.PrivateKey, err = loadCertAndKey(certFile, keyFile) + if err != nil { + log.Fatal("docker_registry_plugin: loadCertAndKey: %V", err) + } + + Docker.IssuerName = cfg.Key("ISSUER_NAME").MustString("gitea") + Docker.ServiceName = cfg.Key("SERVICE_NAME").MustString("gitea-token-service") + Docker.NotifyToken = cfg.Key("NOTIFY_TOKEN").String() + if len(Docker.NotifyToken) == 0 { + log.Fatal("docker_registry_plugin: `NOTIFY_TOKEN` is requested") + } +} + +// HasDockerPlugin has docker plugin +func HasDockerPlugin() bool { + return Docker != nil +} + +func loadCertAndKey(certFile, keyFile string) (pk libtrust.PublicKey, prk libtrust.PrivateKey, err error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return + } + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return + } + pk, err = libtrust.FromCryptoPublicKey(x509Cert.PublicKey) + if err != nil { + return + } + prk, err = libtrust.FromCryptoPrivateKey(cert.PrivateKey) + return +} diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 4976c0007c648..d7f5f0f2a841a 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -1155,4 +1155,5 @@ func NewServices() { newTaskService() NewQueueService() newProject() + newPackage() } diff --git a/modules/structs/package.go b/modules/structs/package.go new file mode 100644 index 0000000000000..bb88a7b2bbb5e --- /dev/null +++ b/modules/structs/package.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Gogs Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package structs + +import "time" + +// Package represents a package +type Package struct { + Name string `json:"name"` + Type string `json:"package_type"` + Owner *User `json:"owner"` + Repo *Repository `json:"repository"` + Private bool `json:"private"` + + // swagger:strfmt date-time + Created *time.Time `json:"created_at"` + // swagger:strfmt date-time + Updated *time.Time `json:"updated_at"` +} diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 08b202d192f84..6373d7bd439ff 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -1957,6 +1957,9 @@ topic.done = Done topic.count_prompt = You can not select more than 25 topics topic.format_prompt = Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long. +packages = packages +packages.desc = permission unit for third part packages + [org] org_name_holder = Organization Name org_full_name_holder = Organization Full Name diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index a8499e0ee8f6a..701812265ccb3 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -951,6 +951,9 @@ func Routes() *web.Route { }, reqAnyRepoReader()) m.Get("/issue_templates", context.ReferencesGitRepo(false), repo.GetIssueTemplates) m.Get("/languages", reqRepoReader(models.UnitTypeCode), repo.GetLanguages) + m.Group("/packages", func() { + m.Get("/{type}/{name}", repo.GetPackage) + }, reqRepoReader(models.UnitTypePackages)) }, repoAssignment()) }) diff --git a/routers/api/v1/repo/package.go b/routers/api/v1/repo/package.go new file mode 100644 index 0000000000000..019c27a55579e --- /dev/null +++ b/routers/api/v1/repo/package.go @@ -0,0 +1,74 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package repo + +import ( + "net/http" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/convert" +) + +// GetPackage get a single package of a repository +func GetPackage(ctx *context.APIContext) { + // swagger:operation GET /repos/{owner}/{repo}/packages/{type}/{name} repository repoGetPackage + // --- + // summary: Get a package + // produces: + // - application/json + // parameters: + // - name: owner + // in: path + // description: owner of the repo + // type: string + // required: true + // - name: repo + // in: path + // description: name of the repo + // type: string + // required: true + // - name: type + // in: path + // description: type of package + // type: string + // required: true + // - name: name + // in: path + // description: name of package + // type: string + // required: true + // responses: + // "200": + // "$ref": "#/responses/Package" + // "404": + // "$ref": "#/responses/notFound" + typ := ctx.Params("type") + name := ctx.Params("name") + if typ != models.PackageTypeDockerImage.String() { + ctx.NotFound() + return + } + pkg, err := models.GetPackage(ctx.Repo.Repository.ID, models.PackageTypeDockerImage, name) + if err != nil { + if models.IsErrPackageNotExist(err) { + ctx.NotFound() + } + ctx.InternalServerError(err) + return + } + + if err := pkg.LoadRepo(); err != nil { + ctx.InternalServerError(err) + return + } + + if err := pkg.Repo.GetOwner(); err != nil { + ctx.InternalServerError(err) + return + } + + ctx.JSON(http.StatusOK, convert.ToPackage(pkg)) +} diff --git a/routers/api/v1/swagger/repo.go b/routers/api/v1/swagger/repo.go index d539bcb9feabb..6576866a31c8b 100644 --- a/routers/api/v1/swagger/repo.go +++ b/routers/api/v1/swagger/repo.go @@ -316,3 +316,17 @@ type swaggerCombinedStatus struct { // in: body Body api.CombinedStatus `json:"body"` } + +// PackageList +// swagger:response PackageList +type swaggerResponsePackageList struct { + // in:body + Body []api.Package `json:"body"` +} + +// Package +// swagger:response Package +type swaggerResponsePackage struct { + // in:body + Body api.Package `json:"body"` +} diff --git a/routers/repo/package.go b/routers/repo/package.go new file mode 100644 index 0000000000000..a24856f639067 --- /dev/null +++ b/routers/repo/package.go @@ -0,0 +1,421 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package repo + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "regexp" + "sort" + "strings" + "time" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" + + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/registry/auth/token" +) + +// DockerPluginLogin tocken service for docker registry +func DockerPluginLogin(ctx *context.Context) { + if !setting.HasDockerPlugin() { + ctx.Status(404) + return + } + + // handle params + service := ctx.Query("service") + if service != setting.Docker.ServiceName { + ctx.Status(http.StatusBadRequest) + return + } + + scope := ctx.Query("scope") + if len(scope) == 0 { + if ctx.User == nil { + ctx.Status(http.StatusUnauthorized) + return + } + // Authentication-only request ("docker login"), pass through. + handleResult(ctx, []authzResult{}) + return + } + + scops, err := handleScopes(scope) + if err != nil { + ctx.Status(http.StatusBadRequest) + return + } + + results := handlePermission(ctx, scops) + if ctx.Written() { + return + } + + handleResult(ctx, results) +} + +func handlePermission(ctx *context.Context, scops []authScope) []authzResult { + rs := make([]authzResult, 0, len(scops)) + repos := make(map[string]*models.Repository) + repoPerms := make(map[int64]models.Permission) + + for _, scop := range scops { + hasPush := false + hasPull := false + needCreate := false + for _, act := range scop.Actions { + if act == "push" { + hasPush = true + continue + } + if act == "pull" { + hasPull = true + } + } + if hasPush && ctx.User == nil { + continue + } + splits := strings.SplitN(scop.Name, "/", 3) + var ( + owner string + repoName string + image string + ) + if len(splits) >= 2 { + owner = splits[0] + repoName = splits[1] + if len(splits) == 3 { + image = splits[2] + } else { + image = repoName + } + } + + repo, has := repos[owner+"/"+repoName] + var err error + if !has { + repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) + if err != nil { + if models.IsErrRepoNotExist(err) { + continue + } + ctx.Error(http.StatusInternalServerError, "handlePermission") + log.Error("docker: GetRepositoryByOwnerAndName: %v", err) + return nil + } + repos[owner+"/"+repoName] = repo + } + + _, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) + if err != nil { + if !models.IsErrPackageNotExist(err) { + ctx.Error(http.StatusInternalServerError, "handlePermission") + log.Error("docker: GetPackage: %v", err) + return nil + } + needCreate = true + } + + perm, has := repoPerms[repo.ID] + if !has { + perm, err = models.GetUserRepoPermission(repo, ctx.User) + if err != nil { + ctx.Error(http.StatusInternalServerError, "handlePermission") + log.Error("docker: GetUserRepoPermission: %v", err) + return nil + } + repoPerms[repo.ID] = perm + } + + events := make([]string, 0, 2) + accessMode := models.AccessModeRead + if hasPush { + accessMode = models.AccessModeRead + if needCreate { + accessMode = models.AccessModeAdmin + } + } + if perm.CanAccess(accessMode, models.UnitTypePackages) { + if hasPush { + events = append(events, "push") + } + if hasPull { + events = append(events, "pull") + } + } + + if len(events) == 0 { + continue + } + + rs = append(rs, authzResult{ + scope: scop, + autorizedActions: events, + }) + } + + return rs +} + +var resourceTypeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`) + +func handleResult(ctx *context.Context, ares []authzResult) { + account := "" + if ctx.User != nil { + account = ctx.User.Name + } + token, err := generateToken(account, ares) + if err != nil { + ctx.Error(http.StatusInternalServerError, "generateToken") + log.Error("Failed to generate token: %v", err) + return + } + + result, _ := json.Marshal(&map[string]string{"access_token": token, "token": token}) + ctx.Header().Set("Content-Type", "application/json") + _, err = ctx.Write(result) + if err != nil { + ctx.Error(http.StatusInternalServerError, "generateToken") + log.Error("Failed to response token: %v", err) + } +} + +type authScope struct { + Type string + Class string + Name string + Actions []string +} + +func handleScopes(scope string) ([]authScope, error) { + scopes := strings.Split(scope, " ") + rs := make([]authScope, 0, len(scopes)) + for _, scopeStr := range scopes { + parts := strings.Split(scopeStr, ":") + var scope authScope + + scopeType, scopeClass, err := parseResourceType(parts[0]) + if err != nil { + return nil, err + } + + switch len(parts) { + case 3: + scope = authScope{ + Type: scopeType, + Class: scopeClass, + Name: parts[1], + Actions: strings.Split(parts[2], ","), + } + case 4: + scope = authScope{ + Type: scopeType, + Class: scopeClass, + Name: parts[1] + ":" + parts[2], + Actions: strings.Split(parts[3], ","), + } + default: + return nil, fmt.Errorf("invalid scope: %q", scopeStr) + } + sort.Strings(scope.Actions) + rs = append(rs, scope) + } + + return rs, nil +} + +func parseResourceType(scope string) (string, string, error) { + parts := resourceTypeRegex.FindStringSubmatch(scope) + if parts == nil { + return "", "", fmt.Errorf("malformed scope request") + } + + switch len(parts) { + case 3: + return parts[1], "", nil + case 4: + return parts[1], parts[3], nil + default: + return "", "", fmt.Errorf("malformed scope request") + } +} + +type authzResult struct { + scope authScope + autorizedActions []string +} + +func generateToken(account string, ares []authzResult) (string, error) { + now := time.Now().Unix() + + // Sign something dummy to find out which algorithm is used. + _, sigAlg, err := setting.Docker.PrivateKey.Sign(strings.NewReader("dummy"), 0) + if err != nil { + return "", fmt.Errorf("failed to sign: %s", err) + } + header := token.Header{ + Type: "JWT", + SigningAlg: sigAlg, + KeyID: setting.Docker.PublicKey.KeyID(), + } + headerJSON, err := json.Marshal(header) + if err != nil { + return "", fmt.Errorf("failed to marshal header: %s", err) + } + + claims := token.ClaimSet{ + Issuer: setting.Docker.IssuerName, + Subject: account, + Audience: setting.Docker.ServiceName, + NotBefore: now - 10, + IssuedAt: now, + Expiration: now + setting.Docker.Expiration, + JWTID: fmt.Sprintf("%d", rand.Int63()), + Access: []*token.ResourceActions{}, + } + for _, a := range ares { + ra := &token.ResourceActions{ + Type: a.scope.Type, + Name: a.scope.Name, + Actions: a.autorizedActions, + } + if ra.Actions == nil { + ra.Actions = []string{} + } + sort.Strings(ra.Actions) + claims.Access = append(claims.Access, ra) + } + claimsJSON, err := json.Marshal(claims) + if err != nil { + return "", fmt.Errorf("failed to marshal claims: %s", err) + } + + payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON)) + + sig, sigAlg2, err := setting.Docker.PrivateKey.Sign(strings.NewReader(payload), 0) + if err != nil || sigAlg2 != sigAlg { + return "", fmt.Errorf("failed to sign token: %s", err) + } + return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil +} + +// Copy-pasted from libtrust where it is private. +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DockerPluginEvent event service for docker registry +func DockerPluginEvent(ctx *context.Context) { + if !setting.HasDockerPlugin() { + ctx.Status(404) + return + } + + token := ctx.Req.Header.Get("X-Token") + if token != setting.Docker.NotifyToken { + ctx.Error(http.StatusUnauthorized) + return + } + + body, err := ioutil.ReadAll(ctx.Req.Body) + if err != nil { + ctx.Error(http.StatusInternalServerError) + log.Error("Failed to read event: %v", err) + } + data := new(notifications.Envelope) + + err = json.Unmarshal(body, data) + if err != nil { + ctx.Error(http.StatusInternalServerError) + log.Error("Failed to unmarshal event: %v", err) + } + + // handle events + repos := make(map[string]*models.Repository) + pkgs := make(map[string]*models.Package) + for _, event := range data.Events { + if event.Action == notifications.EventActionPush { + var ( + owner string + repoName string + image string + ) + splits := strings.SplitN(event.Target.Repository, "/", 3) + if len(splits) < 2 { + continue + } + owner = splits[0] + repoName = splits[1] + if len(splits) == 3 { + image = splits[2] + } else { + image = repoName + } + + repo, has := repos[owner+"/"+repoName] + if !has { + repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) + if err != nil { + if models.IsErrRepoNotExist(err) { + continue + } + ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") + log.Error("docker: GetRepositoryByOwnerAndName: %v", err) + return + } + repos[owner+"/"+repoName] = repo + } + + pkg, has := pkgs[event.Target.Repository] + if !has { + pkg, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) + if err != nil { + if models.IsErrPackageNotExist(err) { + // create a new pkg + err = models.AddPackage(models.AddPackageOptions{ + Repo: repo, + Name: image, + Type: models.PackageTypeDockerImage, + }) + if err != nil { + ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") + log.Error("docker: AddPackage: %v", err) + return + } + continue + } + ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") + log.Error("docker: GetPackage: %v", err) + return + } + if pkg != nil { + pkgs[event.Target.Repository] = pkg + } + } + + // update update time + pkg.UpdatedUnix = timeutil.TimeStamp(event.Timestamp.Unix()) + err = pkg.UpdateCols("updated_unix") + if err != nil { + ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") + log.Error("docker: UpdateCols(updated_unix): %v", err) + return + } + } + } + + ctx.JSON(200, map[string]string{ + "result": "ok", + }) +} diff --git a/routers/repo/setting.go b/routers/repo/setting.go index 692d65b44ce16..49bac77d7b22f 100644 --- a/routers/repo/setting.go +++ b/routers/repo/setting.go @@ -311,6 +311,15 @@ func SettingsPost(ctx *context.Context) { deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeProjects) } + if form.EnablePackages && !models.UnitTypePackages.UnitGlobalDisabled() { + units = append(units, models.RepoUnit{ + RepoID: repo.ID, + Type: models.UnitTypePackages, + }) + } else if !models.UnitTypePackages.UnitGlobalDisabled() { + deleteUnitTypes = append(deleteUnitTypes, models.UnitTypePackages) + } + if form.EnablePulls && !models.UnitTypePullRequests.UnitGlobalDisabled() { units = append(units, models.RepoUnit{ RepoID: repo.ID, diff --git a/routers/routes/web.go b/routers/routes/web.go index 22774b2cdccca..05c492ac895c4 100644 --- a/routers/routes/web.go +++ b/routers/routes/web.go @@ -341,6 +341,8 @@ func RegisterRoutes(m *web.Route) { m.Get("/milestones", reqSignIn, reqMilestonesDashboardPageEnabled, user.Milestones) // ***** START: User ***** + m.Get("/user/docker_token", repo.DockerPluginLogin) + m.Any("/user/docker_event", repo.DockerPluginEvent) m.Group("/user", func() { m.Get("/login", user.SignIn) m.Post("/login", bindIgnErr(auth.SignInForm{}), user.SignInPost) diff --git a/templates/repo/settings/options.tmpl b/templates/repo/settings/options.tmpl index 9d87101671ffb..286febeb690f2 100644 --- a/templates/repo/settings/options.tmpl +++ b/templates/repo/settings/options.tmpl @@ -284,6 +284,20 @@ +
+ {{$isPackagesEnabled := .Repository.UnitEnabled $.UnitTypePackages}} +
+ + {{if .UnitTypePackages.UnitGlobalDisabled}} +
+ {{else}} +
+ {{end}} + + +
+
+ {{if not .IsMirror}}
{{$pullRequestEnabled := .Repository.UnitEnabled $.UnitTypePullRequests}} diff --git a/templates/swagger/v1_json.tmpl b/templates/swagger/v1_json.tmpl index 930af907ea8bd..ae80ff6eb4d63 100644 --- a/templates/swagger/v1_json.tmpl +++ b/templates/swagger/v1_json.tmpl @@ -6902,6 +6902,56 @@ } } }, + "/repos/{owner}/{repo}/packages/{type}/{name}": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "repository" + ], + "summary": "Get a package", + "operationId": "repoGetPackage", + "parameters": [ + { + "type": "string", + "description": "owner of the repo", + "name": "owner", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "name of the repo", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "type of package", + "name": "type", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "name of package", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "$ref": "#/responses/Package" + }, + "404": { + "$ref": "#/responses/notFound" + } + } + } + }, "/repos/{owner}/{repo}/pulls": { "get": { "produces": [ @@ -15032,6 +15082,41 @@ }, "x-go-package": "code.gitea.io/gitea/modules/structs" }, + "Package": { + "description": "Package represents a package", + "type": "object", + "properties": { + "created_at": { + "type": "string", + "format": "date-time", + "x-go-name": "Created" + }, + "name": { + "type": "string", + "x-go-name": "Name" + }, + "owner": { + "$ref": "#/definitions/User" + }, + "package_type": { + "type": "string", + "x-go-name": "Type" + }, + "private": { + "type": "boolean", + "x-go-name": "Private" + }, + "repository": { + "$ref": "#/definitions/Repository" + }, + "updated_at": { + "type": "string", + "format": "date-time", + "x-go-name": "Updated" + } + }, + "x-go-package": "code.gitea.io/gitea/modules/structs" + }, "PayloadCommit": { "description": "PayloadCommit represents a commit", "type": "object", @@ -16725,6 +16810,21 @@ } } }, + "Package": { + "description": "Package", + "schema": { + "$ref": "#/definitions/Package" + } + }, + "PackageList": { + "description": "PackageList", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Package" + } + } + }, "PublicKey": { "description": "PublicKey", "schema": { diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 0000000000000..4cf7888e927e9 --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace +.idea/* diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json new file mode 100644 index 0000000000000..9df5b14bcb2ef --- /dev/null +++ b/vendor/github.com/docker/distribution/.gometalinter.json @@ -0,0 +1,16 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": ["linter", "severity", "path", "line"], + "EnableGC": true, + "Enable": [ + "structcheck", + "staticcheck", + "unconvert", + + "gofmt", + "goimports", + "golint", + "vet" + ] +} diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 0000000000000..0f48321d49713 --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,32 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml new file mode 100644 index 0000000000000..44ced60451db2 --- /dev/null +++ b/vendor/github.com/docker/distribution/.travis.yml @@ -0,0 +1,51 @@ +dist: trusty +sudo: required +# setup travis so that we can run containers for integration tests +services: + - docker + +language: go + +go: + - "1.11.x" + +go_import_path: github.com/docker/distribution + +addons: + apt: + packages: + - python-minimal + + +env: + - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 + +before_install: + - uname -r + - sudo apt-get -q update + +install: + - go get -u github.com/vbatts/git-validation + # TODO: Add enforcement of license + # - go get -u github.com/kunalkushwaha/ltag + - cd $TRAVIS_BUILD_DIR + +script: + - export GOOS=$TRAVIS_GOOS + - export CGO_ENABLED=$TRAVIS_CGO_ENABLED + - DCO_VERBOSITY=-q script/validate/dco + - GOOS=linux script/setup/install-dev-tools + - script/validate/vendor + - go build -i . + - make check + - make build + - make binaries + # Currently takes too long + #- if [ "$GOOS" = "linux" ]; then make test-race ; fi + - if [ "$GOOS" = "linux" ]; then make coverage ; fi + +after_success: + - bash <(curl -s https://codecov.io/bash) -F linux + +before_deploy: + # Run tests with storage driver configurations diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 0000000000000..2981d016b0d55 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry --version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 0000000000000..4c067d9e7ece3 --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 0000000000000..9537817ca3a40 --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.11-alpine AS build + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV BUILDTAGS include_oss include_gcs + +ARG GOOS=linux +ARG GOARCH=amd64 +ARG GOARM=6 + +RUN set -ex \ + && apk add --no-cache make git file + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" + +FROM alpine +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 0000000000000..e06d2081865a7 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 0000000000000..3183620c57b95 --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,243 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + [Org.Maintainers] + people = [ + "dmcgowan", + "dmp42", + "stevvooe", + ] + [Org.Reviewers] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" + + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 0000000000000..4635c6eca8792 --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,102 @@ +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +WHALE = "+" + +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage +.DEFAULT: all + +all: binaries + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + @echo "$(WHALE) $@" + ./version/version.sh > $@ + +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + gometalinter --config .gometalinter.json ./... + +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} + +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +FORCE: + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +build: + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 0000000000000..998878850cde3 --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 0000000000000..701127afec6e1 --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 0000000000000..c0e9261be9321 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,265 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/configuration/configuration.go b/vendor/github.com/docker/distribution/configuration/configuration.go new file mode 100644 index 0000000000000..b347d63b98a22 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/configuration.go @@ -0,0 +1,684 @@ +package configuration + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables. +// +// Note that yaml field names should never include _ characters, since this is the separator used +// in environment variable names. +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // AccessLog configures access logging. + AccessLog struct { + // Disabled disables access logging. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level,omitempty"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configure the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. + // + // Deprecated: Use Log.Level instead. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + + // Amount of time to wait for connection to drain before shutting down when registry + // receives a stop signal + DrainTimeout time.Duration `yaml:"draintimeout,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + + // LetsEncrypt is used to configuration setting up TLS through + // Let's Encrypt instead of manually specifying certificate and + // key. If a TLS certificate is specified, the Let's Encrypt + // section will not be used. + LetsEncrypt struct { + // CacheFile specifies cache file to use for lets encrypt + // certificates and keys. + CacheFile string `yaml:"cachefile,omitempty"` + + // Email is the email to use during Let's Encrypt registration + Email string `yaml:"email,omitempty"` + + // Hosts specifies the hosts which are allowed to obtain Let's + // Encrypt certificates. + Hosts []string `yaml:"hosts,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + // Prometheus configures the Prometheus telemetry endpoint. + Prometheus struct { + Enabled bool `yaml:"enabled,omitempty"` + Path string `yaml:"path,omitempty"` + } `yaml:"prometheus,omitempty"` + } `yaml:"debug,omitempty"` + + // HTTP2 configuration options + HTTP2 struct { + // Specifies whether the registry should disallow clients attempting + // to connect via http2. If set to true, only http/1.1 is supported. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Health Health `yaml:"health,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` + + // Compatibility is used for configurations of working with older or deprecated features. + Compatibility struct { + // Schema1 configures how schema1 manifests will be handled + Schema1 struct { + // TrustKey is the signing key to use for adding the signature to + // schema1 manifests. + TrustKey string `yaml:"signingkeyfile,omitempty"` + // Enabled determines if schema1 manifests should be pullable + Enabled bool `yaml:"enabled,omitempty"` + } `yaml:"schema1,omitempty"` + } `yaml:"compatibility,omitempty"` + + // Validation configures validation options for the registry. + Validation struct { + // Enabled enables the other options in this section. This field is + // deprecated in favor of Disabled. + Enabled bool `yaml:"enabled,omitempty"` + // Disabled disables the other options in this section. + Disabled bool `yaml:"disabled,omitempty"` + // Manifests configures manifest validation. + Manifests struct { + // URLs configures validation for URLs in pushed manifests. + URLs struct { + // Allow specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must match. + Allow []string `yaml:"allow,omitempty"` + // Deny specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must not match. + Deny []string `yaml:"deny,omitempty"` + } `yaml:"urls,omitempty"` + } `yaml:"manifests,omitempty"` + } `yaml:"validation,omitempty"` + + // Policy configures registry policy options. + Policy struct { + // Repository configures policies for repositories + Repository struct { + // Classes is a list of repository classes which the + // registry allows content for. This class is matched + // against the configuration media type inside uploaded + // manifests. When non-empty, the registry will enforce + // the class in authorized resources. + Classes []string `yaml:"classes"` + } `yaml:"repository,omitempty"` + } `yaml:"policy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configure email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure certification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// FileChecker is a type of entry in the health section for checking files. +type FileChecker struct { + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check + File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// HTTPChecker is a type of entry in the health section for checking HTTP URIs. +type HTTPChecker struct { + // Timeout is the duration to wait before timing out the HTTP request + Timeout time.Duration `yaml:"timeout,omitempty"` + // StatusCode is the expected status code + StatusCode int + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check + URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests + Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// TCPChecker is a type of entry in the health section for checking TCP servers. +type TCPChecker struct { + // Timeout is the duration to wait before timing out the TCP connection + Timeout time.Duration `yaml:"timeout,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check + Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Health provides the configuration section for health checks. +type Health struct { + // FileCheckers is a list of paths to check + FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check + HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check + TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage + // driver + StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` + } `yaml:"storagedriver,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + var storageType []string + + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + storageType = append(storageType, k) + } + } + if len(storageType) > 1 { + panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) + } + if len(storageType) == 1 { + return storageType[0] + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the auth type, such as htpasswd or token +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // EventConfig is the configuration for the event format that is sent to each Endpoint. + EventConfig Events `yaml:"events,omitempty"` + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration + IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore + Ignore Ignore `yaml:"ignore"` // ignore event types +} + +// Events configures notification events. +type Events struct { + IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events +} + +//Ignore configures mediaTypes and actions of the event, that it won't be propagated +type Ignore struct { + MediaTypes []string `yaml:"mediatypes"` // target media types to ignore + Actions []string `yaml:"actions"` // ignore action types +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Log.Level == Loglevel("") { + if v0_1.Loglevel != Loglevel("") { + v0_1.Log.Level = v0_1.Loglevel + } else { + v0_1.Log.Level = Loglevel("info") + } + } + if v0_1.Loglevel != Loglevel("") { + v0_1.Loglevel = Loglevel("") + } + if v0_1.Storage.Type() == "" { + return nil, errors.New("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/docker/distribution/configuration/parser.go b/vendor/github.com/docker/distribution/configuration/parser.go new file mode 100644 index 0000000000000..b46f7326fd6c4 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/parser.go @@ -0,0 +1,283 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +type envVar struct { + name string + value string +} + +type envVars []envVar + +func (a envVars) Len() int { return len(a) } +func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env envVars +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env = append(p.env, envVar{envParts[0], envParts[1]}) + } + + // We must sort the environment variables lexically by name so that + // more specific variables are applied before less specific ones + // (i.e. REGISTRY_STORAGE before + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a + // lot simpler and easier to get right than unmarshalling map entries + // into temporaries and merging with the existing entry. + sort.Sort(p.env) + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + for _, envVar := range p.env { + pathStr := envVar.name + if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { + path := strings.Split(pathStr, "_") + + err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) + if err != nil { + return err + } + } + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +// overwriteFields replaces configuration values with alternate values specified +// through the environment. Precondition: an empty path slice must never be +// passed in. +func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + panic("encountered nil pointer while handling environment variable " + fullpath) + } + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + return p.overwriteStruct(v, fullpath, path, payload) + case reflect.Map: + return p.overwriteMap(v, fullpath, path, payload) + case reflect.Interface: + if v.NumMethod() == 0 { + if !v.IsNil() { + return p.overwriteFields(v.Elem(), fullpath, path, payload) + } + // Interface was empty; create an implicit map + var template map[string]interface{} + wrappedV := reflect.MakeMap(reflect.TypeOf(template)) + v.Set(wrappedV) + return p.overwriteMap(wrappedV, fullpath, path, payload) + } + } + return nil +} + +func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { + // Generate case-insensitive map of struct fields + byUpperCase := make(map[string]int) + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + } + byUpperCase[upper] = i + } + + fieldIndex, present := byUpperCase[path[0]] + if !present { + logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) + return nil + } + field := v.Field(fieldIndex) + sf := v.Type().Field(fieldIndex) + + if len(path) == 1 { + // Env var specifies this field directly + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) + if err != nil { + return err + } + field.Set(reflect.Indirect(fieldVal)) + return nil + } + + // If the field is nil, must create an object + switch sf.Type.Kind() { + case reflect.Map: + if field.IsNil() { + field.Set(reflect.MakeMap(sf.Type)) + } + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(sf.Type)) + } + } + + err := p.overwriteFields(field, fullpath, path[1:], payload) + if err != nil { + return err + } + + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { + if m.Type().Key().Kind() != reflect.String { + // non-string keys unsupported + logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) + return nil + } + + if len(path) > 1 { + // If a matching key exists, get its value and continue the + // overwriting process. + for _, k := range m.MapKeys() { + if strings.ToUpper(k.String()) == path[0] { + mapValue := m.MapIndex(k) + // If the existing value is nil, we want to + // recreate it instead of using this value. + if (mapValue.Kind() == reflect.Ptr || + mapValue.Kind() == reflect.Interface || + mapValue.Kind() == reflect.Map) && + mapValue.IsNil() { + break + } + return p.overwriteFields(mapValue, fullpath, path[1:], payload) + } + } + } + + // (Re)create this key + var mapValue reflect.Value + if m.Type().Elem().Kind() == reflect.Map { + mapValue = reflect.MakeMap(m.Type().Elem()) + } else { + mapValue = reflect.New(m.Type().Elem()) + } + if len(path) > 1 { + err := p.overwriteFields(mapValue, fullpath, path[1:], payload) + if err != nil { + return err + } + } else { + err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) + if err != nil { + return err + } + } + + m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) + + return nil +} diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go new file mode 100644 index 0000000000000..ab6865467b03c --- /dev/null +++ b/vendor/github.com/docker/distribution/context/context.go @@ -0,0 +1,73 @@ +package context + +import ( + "context" + "sync" + + "github.com/docker/distribution/uuid" +) + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + context.Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() context.Context { + return background +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go new file mode 100644 index 0000000000000..0c631a9c91351 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -0,0 +1,88 @@ +// Package context provides several utilities for working with +// Go's context in http requests. Primarily, the focus is on logging relevant +// request information but this package is not limited to that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped variables. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go new file mode 100644 index 0000000000000..bc22f0bba13a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/http.go @@ -0,0 +1,337 @@ +package context + +import ( + "context" + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx context.Context, r *http.Request) context.Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx context.Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx context.Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx context.Context, r *http.Request) context.Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx context.Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx context.Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + context.Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + context.Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + context.Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go new file mode 100644 index 0000000000000..3e5b81bbf847b --- /dev/null +++ b/vendor/github.com/docker/distribution/context/logger.go @@ -0,0 +1,121 @@ +package context + +import ( + "context" + "fmt" + "runtime" + + "github.com/sirupsen/logrus" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) + + WithError(err error) *logrus.Entry +} + +type loggerKey struct{} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx context.Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value(loggerKey{}) + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go new file mode 100644 index 0000000000000..5b88ddaf44e2f --- /dev/null +++ b/vendor/github.com/docker/distribution/context/trace.go @@ -0,0 +1,105 @@ +package context + +import ( + "context" + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + context.Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go new file mode 100644 index 0000000000000..c462e756329c6 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/util.go @@ -0,0 +1,25 @@ +package context + +import ( + "context" + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx context.Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx context.Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go new file mode 100644 index 0000000000000..97cf9d665f425 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version.go @@ -0,0 +1,22 @@ +package context + +import "context" + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, versionKey{})) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) +} diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 0000000000000..71327dca72091 --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 0000000000000..bdd8cb708e5d9 --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 0000000000000..8e0b788d6c5cb --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,119 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 0000000000000..1816baea1d653 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge.go b/vendor/github.com/docker/distribution/notifications/bridge.go new file mode 100644 index 0000000000000..eb9af41a0aeb2 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/bridge.go @@ -0,0 +1,234 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/uuid" + "github.com/opencontainers/go-digest" +) + +type bridge struct { + ub URLBuilder + includeReferences bool + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name reference.Named) (string, error) + BuildBlobURL(ref reference.Canonical) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink, includeReferences bool) Listener { + return &bridge{ + ub: ub, + includeReferences: includeReferences, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo.Name() + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) TagDeleted(repo reference.Named, tag string) error { + event := b.createEvent(EventActionDelete) + event.Target.Repository = repo.Name() + event.Target.Tag = tag + + return b.sink.Write(*event) +} + +func (b *bridge) RepoDeleted(repo reference.Named) error { + event := b.createEvent(EventActionDelete) + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + event.Target.Digest = dgst + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } + + // Ensure we have the canonical manifest descriptor here + manifest, desc, err := distribution.UnmarshalManifest(mt, p) + if err != nil { + return nil, err + } + + event.Target.MediaType = mt + event.Target.Length = desc.Size + event.Target.Size = desc.Size + event.Target.Digest = desc.Digest + if b.includeReferences { + event.Target.References = append(event.Target.References, manifest.References()...) + } + + ref, err := reference.WithDigest(repo, event.Target.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Digest = dgst + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo.Name() + + ref, err := reference.WithDigest(repo, desc.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/vendor/github.com/docker/distribution/notifications/endpoint.go b/vendor/github.com/docker/distribution/notifications/endpoint.go new file mode 100644 index 0000000000000..a8a52d0c91013 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/endpoint.go @@ -0,0 +1,97 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution/configuration" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration + IgnoredMediaTypes []string + Transport *http.Transport `json:"-"` + Ignore configuration.Ignore +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } + + if ec.Transport == nil { + ec.Transport = http.DefaultTransport.(*http.Transport) + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.Transport, endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + mediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...) + endpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/vendor/github.com/docker/distribution/notifications/event.go b/vendor/github.com/docker/distribution/notifications/event.go new file mode 100644 index 0000000000000..54940a46f51d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/event.go @@ -0,0 +1,163 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionMount = "mount" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + + // Tag provides the tag + Tag string `json:"tag,omitempty"` + + // References provides the references descriptors. + References []distribution.Descriptor `json:"references,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorization context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorization context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/distribution/notifications/http.go b/vendor/github.com/docker/distribution/notifications/http.go new file mode 100644 index 0000000000000..15751619b30b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/http.go @@ -0,0 +1,150 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, transport *http.Transport, listeners ...httpStatusListener) *httpSink { + if transport == nil { + transport = http.DefaultTransport.(*http.Transport) + } + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: transport, + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/vendor/github.com/docker/distribution/notifications/listener.go b/vendor/github.com/docker/distribution/notifications/listener.go new file mode 100644 index 0000000000000..98ad8da9fb9bd --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/listener.go @@ -0,0 +1,263 @@ +package notifications + +import ( + "context" + "net/http" + + "github.com/docker/distribution" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestDeleted(repo reference.Named, dgst digest.Digest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo reference.Named, desc distribution.Descriptor) error + BlobPulled(repo reference.Named, desc distribution.Descriptor) error + BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error + BlobDeleted(repo reference.Named, desc digest.Digest) error +} + +// RepoListener provides repository methods that respond to repository lifecycle +type RepoListener interface { + TagDeleted(repo reference.Named, tag string) error + RepoDeleted(repo reference.Named) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener + RepoListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +type removerListener struct { + distribution.RepositoryRemover + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, remover distribution.RepositoryRemover, listener Listener) (distribution.Repository, distribution.RepositoryRemover) { + return &repositoryListener{ + Repository: repo, + listener: listener, + }, &removerListener{ + RepositoryRemover: remover, + listener: listener, + } +} + +func (nl *removerListener) Remove(ctx context.Context, name reference.Named) error { + err := nl.RepositoryRemover.Remove(ctx, name) + if err != nil { + return err + } + return nl.listener.RepoDeleted(name) +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := msl.ManifestService.Delete(ctx, dgst) + if err == nil { + if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) + } + } + + return err +} + +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst, options...) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return dgst, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := bsl.BlobStore.Delete(ctx, dgst) + if err == nil { + if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + } + } + + return err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} + +type tagServiceListener struct { + distribution.TagService + parent *repositoryListener +} + +func (rl *repositoryListener) Tags(ctx context.Context) distribution.TagService { + return &tagServiceListener{ + TagService: rl.Repository.Tags(ctx), + parent: rl, + } +} + +func (tagSL *tagServiceListener) Untag(ctx context.Context, tag string) error { + if err := tagSL.TagService.Untag(ctx, tag); err != nil { + return err + } + if err := tagSL.parent.listener.TagDeleted(tagSL.parent.Repository.Named(), tag); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching tag deleted to listener: %v", err) + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/notifications/metrics.go b/vendor/github.com/docker/distribution/notifications/metrics.go new file mode 100644 index 0000000000000..a20af168733c3 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevant counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevant events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/vendor/github.com/docker/distribution/notifications/sinks.go b/vendor/github.com/docker/distribution/notifications/sinks.go new file mode 100644 index 0000000000000..14b692a3cf7ab --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/sinks.go @@ -0,0 +1,392 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shuts down the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// ignoredSink discards events with ignored target media types and actions. +// passes the rest along. +type ignoredSink struct { + Sink + ignoreMediaTypes map[string]bool + ignoreActions map[string]bool +} + +func newIgnoredSink(sink Sink, ignored []string, ignoreActions []string) Sink { + if len(ignored) == 0 { + return sink + } + + ignoredMap := make(map[string]bool) + for _, mediaType := range ignored { + ignoredMap[mediaType] = true + } + + ignoredActionsMap := make(map[string]bool) + for _, action := range ignoreActions { + ignoredActionsMap[action] = true + } + + return &ignoredSink{ + Sink: sink, + ignoreMediaTypes: ignoredMap, + ignoreActions: ignoredActionsMap, + } +} + +// Write discards events with ignored target media types and passes the rest +// along. +func (imts *ignoredSink) Write(events ...Event) error { + var kept []Event + for _, e := range events { + if !imts.ignoreMediaTypes[e.Target.MediaType] { + kept = append(kept, e) + } + } + if len(kept) == 0 { + return nil + } + + var results []Event + for _, e := range kept { + if !imts.ignoreActions[e.Action] { + results = append(results, e) + } + } + if len(results) == 0 { + return nil + } + return imts.Sink.Write(results...) +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 0000000000000..978df7eabbf19 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 0000000000000..2d71fc5e9ffd9 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 0000000000000..2f66cca87a3c1 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 0000000000000..78603493203f4 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 0000000000000..6c32109894d5e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,118 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 0000000000000..835eff73dcf51 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,201 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(r, w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "context" + "errors" + "fmt" + "net/http" +) + +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication fails. + ErrAuthenticationFailure = errors.New("authentication failure") +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Class string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(r *http.Request, w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// CredentialAuthenticator is an object which is able to authenticate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case UserKey: + return uic.user + case UserNameKey: + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// WithResources returns a context with the authorized resources. +func WithResources(ctx context.Context, resources []Resource) context.Context { + return resourceContext{ + Context: ctx, + resources: resources, + } +} + +type resourceContext struct { + context.Context + resources []Resource +} + +type resourceKey struct{} + +func (rc resourceContext) Value(key interface{}) interface{} { + if key == (resourceKey{}) { + return rc.resources + } + + return rc.Context.Value(key) +} + +// AuthorizedResources returns the list of resources which have +// been authorized for this request. +func AuthorizedResources(ctx context.Context) []Resource { + if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok { + return resources + } + + return nil +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 0000000000000..fa924f0beb67d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,293 @@ +package token + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + autoRedirect bool + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams(r *http.Request) string { + var realm string + if ac.autoRedirect { + realm = fmt.Sprintf("https://%s/auth/token", r.Host) + } else { + realm = ac.realm + } + str := fmt.Sprintf("Bearer realm=%q,service=%q", realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams(r)) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + autoRedirect bool + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + autoRedirect bool + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + autoRedirectVal, ok := options["autoredirect"] + if ok { + autoRedirect, ok := autoRedirectVal.(bool) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect") + } + opts.autoRedirect = autoRedirect + } + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + if pemBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + } + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + autoRedirect: config.autoRedirect, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + autoRedirect: ac.autoRedirect, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + ctx = auth.WithResources(ctx, token.resources()) + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/stringset.go b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 0000000000000..1d04f104c52e4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 0000000000000..7f87d496f6bd0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,378 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/libtrust" + log "github.com/sirupsen/logrus" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." + // Leeway is the Duration that will be added to NBF and EXP claim + // checks to account for clock skew as per https://tools.ietf.org/html/rfc7519#section-4.1.5 + Leeway = 60 * time.Second +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Class string `json:"class,omitempty"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Infof("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Infof("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Infof("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentTime := time.Now() + + ExpWithLeeway := time.Unix(t.Claims.Expiration, 0).Add(Leeway) + if currentTime.After(ExpWithLeeway) { + log.Infof("token not to be used after %s - currently %s", ExpWithLeeway, currentTime) + return ErrInvalidToken + } + + NotBeforeWithLeeway := time.Unix(t.Claims.NotBefore, 0).Add(-Leeway) + if currentTime.Before(NotBeforeWithLeeway) { + log.Infof("token not to be used before %s - currently %s", NotBeforeWithLeeway, currentTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Info("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Info(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Infof("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case rawJWK != nil: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) resources() []auth.Resource { + if t.Claims == nil { + return nil + } + + resourceSet := map[auth.Resource]struct{}{} + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Class: resourceActions.Class, + Name: resourceActions.Name, + } + resourceSet[resource] = struct{}{} + } + + resources := make([]auth.Resource, 0, len(resourceSet)) + for resource := range resourceSet { + resources = append(resources, resource) + } + + return resources +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/util.go b/vendor/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 0000000000000..d7f95be42170e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 0000000000000..f22df2b850e16 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 0000000000000..d433ccaf512d7 --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 0000000000000..a249caf269d8d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,51 @@ +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 v2.2.1 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 0000000000000..05be0f8ab3d65 --- /dev/null +++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE new file mode 100644 index 0000000000000..27448585ad49c --- /dev/null +++ b/vendor/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 0000000000000..9768175feb4a7 --- /dev/null +++ b/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 0000000000000..dcffb31ae4a7d --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 0000000000000..3dcca33cb180e --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 0000000000000..ec5d2159c1106 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 0000000000000..00bbe4b3caf83 --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 0000000000000..5b2b4fca6fb07 --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 0000000000000..a2df787dd994c --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 0000000000000..cb2ca9a76904b --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 0000000000000..73642db2a8b76 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 0000000000000..c526de5455bcf --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 0000000000000..9a98ae3574f17 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 0000000000000..dac4cacf20ef1 --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 0000000000000..a5a101d3f1175 --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE new file mode 100644 index 0000000000000..14127cd831ec1 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md new file mode 100644 index 0000000000000..09a4a35c9bb74 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -0,0 +1,42 @@ +# Windows Terminal Sequences + +This library allow for enabling Windows terminal color support for Go. + +See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. + +## Usage + +```go +import ( + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func main() { + sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) +} + +``` + +## Authors + +The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). + +We thank all the authors who provided code to this library: + +* Felix Kollmann +* Nicolas Perraut +* @dirty49374 + +## License + +(The MIT License) + +Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 0000000000000..716c6131256fb --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 0000000000000..57f530ae83f6e --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,35 @@ +// +build windows + +package sequences + +import ( + "syscall" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 0000000000000..df61a6f2f6fe5 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 0000000000000..eaf8b2f9e6c42 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1,4 @@ +Aaron Lehmann +Derek McGowan +Stephen J Day +Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 0000000000000..b6165f83ca738 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,28 @@ +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: + required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 0000000000000..5775f885c1e97 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x + - 1.13.x + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 0000000000000..e4d962ac16e53 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 0000000000000..3ac8ab64872ed --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 0000000000000..e26cd4fc8ed91 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 0000000000000..843b1b20618ba --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,5 @@ +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 0000000000000..a11287207edcf --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,96 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). + +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. + +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. + +## Security + +If you find an issue, please follow the [security][security] protocol to report it. + +# Copyright and license + +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 0000000000000..490951dc3f293 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,193 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 0000000000000..518b5e7154563 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,157 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 0000000000000..ede9077571150 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,40 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 0000000000000..83d3a936ca6a2 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,62 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/go.mod b/vendor/github.com/opencontainers/go-digest/go.mod new file mode 100644 index 0000000000000..cf5d7b1d2d78a --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/go.mod @@ -0,0 +1,3 @@ +module github.com/opencontainers/go-digest + +go 1.13 diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 0000000000000..afef506f4649f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,46 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 0000000000000..9fdc20fdb6a80 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 0000000000000..35d8108958ff0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 0000000000000..fe799bd698c71 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 0000000000000..6e442a0853f4b --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 0000000000000..4e6c4b236237d --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 0000000000000..fc79e9e0d140f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 0000000000000..7ff32c40ba305 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 0000000000000..bad7bb97f4734 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 0000000000000..5d493df23300e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 1 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 0000000000000..58a1510f33e94 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 0000000000000..6b7d7d1e8b90b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,2 @@ +logrus +vendor diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 0000000000000..65dc2850377dc --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000000..5e20aa4140c32 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,17 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: [1.13.x, 1.14.x] +os: [linux, osx] +install: + - ./travis/install.sh +script: + - ./travis/cross_build.sh + - ./travis/lint.sh + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 + - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000000000..584026d67caa9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,223 @@ +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 0000000000000..f090cb42f370b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 0000000000000..5796706dbfa24 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000000..8fd189e1ccae8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000000000..df9d65c3a5bb9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 0000000000000..da67aba06debf --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 0000000000000..f6e062a3466c5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,426 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + + entry.Level = level + entry.Message = msg + entry.Logger.mu.Lock() + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } + entry.Logger.mu.Unlock() + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err = entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 0000000000000..42b04f6c80944 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,225 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 0000000000000..408883773ebbd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod new file mode 100644 index 0000000000000..d41329679f83f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -0,0 +1,11 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 +) + +go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum new file mode 100644 index 0000000000000..49c690f238376 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 0000000000000..3f151cdc39275 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000000..ba7f237112bdd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,125 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 0000000000000..6fdda748e4db2 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,352 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.JSONFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 0000000000000..2f16224cb9ffa --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000000..2403de981929f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000000000..499789984d2bf --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 0000000000000..ebdae3ec62629 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 0000000000000..97af92c68ea2d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000000..3293fb3caad45 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 0000000000000..f6710b3bd0b04 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000000000..cc4fe6e317769 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 0000000000000..572889db21629 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,34 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} + +func checkIfTerminal(w io.Writer) bool { + var ret bool + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + ret = (err == nil) + default: + ret = false + } + if ret { + initTerminal(w) + } + return ret +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000000..3c28b54cabae8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,334 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 0000000000000..72e8e3a1b65f1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 66779fc23a90b..2296c61af8395 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -210,6 +210,22 @@ github.com/dgryski/go-rendezvous ## explicit github.com/dlclark/regexp2 github.com/dlclark/regexp2/syntax +# github.com/docker/distribution v2.7.1+incompatible +## explicit +github.com/docker/distribution +github.com/docker/distribution/configuration +github.com/docker/distribution/context +github.com/docker/distribution/digestset +github.com/docker/distribution/notifications +github.com/docker/distribution/reference +github.com/docker/distribution/registry/auth +github.com/docker/distribution/registry/auth/token +github.com/docker/distribution/uuid +# github.com/docker/go-metrics v0.0.1 +## explicit +# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 +## explicit +github.com/docker/libtrust # github.com/dsnet/compress v0.0.1 github.com/dsnet/compress github.com/dsnet/compress/bzip2 @@ -522,6 +538,8 @@ github.com/klauspost/cpuid/v2 # github.com/klauspost/pgzip v1.2.5 ## explicit github.com/klauspost/pgzip +# github.com/konsorten/go-windows-terminal-sequences v1.0.3 +github.com/konsorten/go-windows-terminal-sequences # github.com/kr/pretty v0.2.1 github.com/kr/pretty # github.com/kr/text v0.2.0 @@ -655,6 +673,13 @@ github.com/oliamb/cutter github.com/olivere/elastic/v7 github.com/olivere/elastic/v7/config github.com/olivere/elastic/v7/uritemplates +# github.com/opencontainers/go-digest v1.0.0 +## explicit +github.com/opencontainers/go-digest +# github.com/opencontainers/image-spec v1.0.1 +## explicit +github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 # github.com/pelletier/go-toml v1.8.1 ## explicit github.com/pelletier/go-toml @@ -715,6 +740,8 @@ github.com/shurcooL/httpfs/vfsutil # github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 ## explicit github.com/shurcooL/vfsgen +# github.com/sirupsen/logrus v1.6.0 +github.com/sirupsen/logrus # github.com/spf13/afero v1.5.1 ## explicit github.com/spf13/afero From cb934f1668c39eb95b724d9aebd37a8234135a7c Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Mon, 8 Mar 2021 15:05:35 +0800 Subject: [PATCH 2/8] Update routers/repo/package.go Co-authored-by: techknowlogick --- routers/repo/package.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/routers/repo/package.go b/routers/repo/package.go index a24856f639067..987807f989be7 100644 --- a/routers/repo/package.go +++ b/routers/repo/package.go @@ -26,7 +26,7 @@ import ( "github.com/docker/distribution/registry/auth/token" ) -// DockerPluginLogin tocken service for docker registry +// DockerPluginLogin token service for docker registry func DockerPluginLogin(ctx *context.Context) { if !setting.HasDockerPlugin() { ctx.Status(404) From 95552336418ff4caa7b4184bde33a6c72b52053b Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Mon, 8 Mar 2021 16:04:19 +0800 Subject: [PATCH 3/8] use decoder --- routers/repo/package.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/routers/repo/package.go b/routers/repo/package.go index 987807f989be7..c99468345a9be 100644 --- a/routers/repo/package.go +++ b/routers/repo/package.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "math/rand" "net/http" "regexp" @@ -328,14 +327,12 @@ func DockerPluginEvent(ctx *context.Context) { return } - body, err := ioutil.ReadAll(ctx.Req.Body) - if err != nil { - ctx.Error(http.StatusInternalServerError) - log.Error("Failed to read event: %v", err) - } + decoder := json.NewDecoder(ctx.Req.Body) + decoder.DisallowUnknownFields() + data := new(notifications.Envelope) - err = json.Unmarshal(body, data) + err := decoder.Decode(data) if err != nil { ctx.Error(http.StatusInternalServerError) log.Error("Failed to unmarshal event: %v", err) From 47e2a11e43657cddee2cd8b36f7ed9782973fced Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Mon, 8 Mar 2021 18:00:45 +0800 Subject: [PATCH 4/8] split code --- modules/packages/docker/auth.go | 257 +++++++++++++++++++ modules/packages/docker/event_hook.go | 80 ++++++ routers/repo/package.go | 342 ++------------------------ 3 files changed, 361 insertions(+), 318 deletions(-) create mode 100644 modules/packages/docker/auth.go create mode 100644 modules/packages/docker/event_hook.go diff --git a/modules/packages/docker/auth.go b/modules/packages/docker/auth.go new file mode 100644 index 0000000000000..f4edf4392a5f4 --- /dev/null +++ b/modules/packages/docker/auth.go @@ -0,0 +1,257 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math/rand" + "regexp" + "sort" + "strings" + "time" + + "code.gitea.io/gitea/models" + "github.com/docker/distribution/registry/auth/token" + "github.com/docker/libtrust" +) + +// AuthzResult auth result +type AuthzResult struct { + Scope AuthScope + AutorizedActions []string +} + +// AuthScope auth scope +type AuthScope struct { + Type string + Class string + Name string + Actions []string +} + +// GenerateTokenOptions options to generate a token +type GenerateTokenOptions struct { + Account string + IssuerName string + AuthzResults []AuthzResult + PublicKey *libtrust.PublicKey + PrivateKey *libtrust.PrivateKey + ServiceName string + Expiration int64 +} + +// GenerateToken generate token +func GenerateToken(opts GenerateTokenOptions) (string, error) { + now := time.Now().Unix() + + // Sign something dummy to find out which algorithm is used. + _, sigAlg, err := (*opts.PrivateKey).Sign(strings.NewReader("dummy"), 0) + if err != nil { + return "", fmt.Errorf("failed to sign: %s", err) + } + header := token.Header{ + Type: "JWT", + SigningAlg: sigAlg, + KeyID: (*opts.PublicKey).KeyID(), + } + headerJSON, err := json.Marshal(header) + if err != nil { + return "", fmt.Errorf("failed to marshal header: %s", err) + } + + claims := token.ClaimSet{ + Issuer: opts.IssuerName, + Subject: opts.Account, + Audience: opts.ServiceName, + NotBefore: now - 10, + IssuedAt: now, + Expiration: now + opts.Expiration, + JWTID: fmt.Sprintf("%d", rand.Int63()), + Access: []*token.ResourceActions{}, + } + for _, a := range opts.AuthzResults { + ra := &token.ResourceActions{ + Type: a.Scope.Type, + Name: a.Scope.Name, + Actions: a.AutorizedActions, + } + if ra.Actions == nil { + ra.Actions = []string{} + } + sort.Strings(ra.Actions) + claims.Access = append(claims.Access, ra) + } + claimsJSON, err := json.Marshal(claims) + if err != nil { + return "", fmt.Errorf("failed to marshal claims: %s", err) + } + + payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON)) + + sig, sigAlg2, err := (*opts.PrivateKey).Sign(strings.NewReader(payload), 0) + if err != nil || sigAlg2 != sigAlg { + return "", fmt.Errorf("failed to sign token: %s", err) + } + return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil +} + +// Copy-pasted from libtrust where it is private. +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// PermissionCheck Permission check +func PermissionCheck(doer *models.User, scops []AuthScope) ([]AuthzResult, error) { + rs := make([]AuthzResult, 0, len(scops)) + repos := make(map[string]*models.Repository) + repoPerms := make(map[int64]models.Permission) + + for _, scop := range scops { + hasPush := false + hasPull := false + needCreate := false + for _, act := range scop.Actions { + if act == "push" { + hasPush = true + continue + } + if act == "pull" { + hasPull = true + } + } + if hasPush && doer == nil { + continue + } + splits := strings.SplitN(scop.Name, "/", 3) + var ( + owner string + repoName string + image string + ) + if len(splits) != 3 { + continue + } + owner = splits[0] + repoName = splits[1] + image = splits[2] + + repo, has := repos[owner+"/"+repoName] + var err error + if !has { + repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) + if err != nil { + if models.IsErrRepoNotExist(err) { + continue + } + return nil, err + } + repos[owner+"/"+repoName] = repo + } + + _, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) + if err != nil { + if !models.IsErrPackageNotExist(err) { + return nil, err + } + needCreate = true + } + + perm, has := repoPerms[repo.ID] + if !has { + perm, err = models.GetUserRepoPermission(repo, doer) + if err != nil { + return nil, err + } + repoPerms[repo.ID] = perm + } + + events := make([]string, 0, 2) + accessMode := models.AccessModeRead + if hasPush { + accessMode = models.AccessModeRead + if needCreate { + accessMode = models.AccessModeAdmin + } + } + if perm.CanAccess(accessMode, models.UnitTypePackages) { + if hasPush { + events = append(events, "push") + } + if hasPull { + events = append(events, "pull") + } + } + + if len(events) == 0 { + continue + } + + rs = append(rs, AuthzResult{ + Scope: scop, + AutorizedActions: events, + }) + } + + return rs, nil +} + +var resourceTypeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`) + +// parseResourceType parse scope type +func parseResourceType(scope string) (string, string, error) { + parts := resourceTypeRegex.FindStringSubmatch(scope) + if parts == nil { + return "", "", fmt.Errorf("malformed scope request") + } + + switch len(parts) { + case 3: + return parts[1], "", nil + case 4: + return parts[1], parts[3], nil + default: + return "", "", fmt.Errorf("malformed scope request") + } +} + +// SplitScopes split scopes +func SplitScopes(scope string) ([]AuthScope, error) { + scopes := strings.Split(scope, " ") + rs := make([]AuthScope, 0, len(scopes)) + for _, scopeStr := range scopes { + parts := strings.Split(scopeStr, ":") + var scope AuthScope + + scopeType, scopeClass, err := parseResourceType(parts[0]) + if err != nil { + return nil, err + } + + switch len(parts) { + case 3: + scope = AuthScope{ + Type: scopeType, + Class: scopeClass, + Name: parts[1], + Actions: strings.Split(parts[2], ","), + } + case 4: + scope = AuthScope{ + Type: scopeType, + Class: scopeClass, + Name: parts[1] + ":" + parts[2], + Actions: strings.Split(parts[3], ","), + } + default: + return nil, fmt.Errorf("invalid scope: %q", scopeStr) + } + sort.Strings(scope.Actions) + rs = append(rs, scope) + } + + return rs, nil +} diff --git a/modules/packages/docker/event_hook.go b/modules/packages/docker/event_hook.go new file mode 100644 index 0000000000000..86c23d0f5390c --- /dev/null +++ b/modules/packages/docker/event_hook.go @@ -0,0 +1,80 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "strings" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/timeutil" + "github.com/docker/distribution/notifications" +) + +// HandleEvents handle event listen +func HandleEvents(data *notifications.Envelope) error { + repos := make(map[string]*models.Repository) + pkgs := make(map[string]*models.Package) + for _, event := range data.Events { + if event.Action == notifications.EventActionPush { + var ( + owner string + repoName string + image string + err error + ) + splits := strings.SplitN(event.Target.Repository, "/", 3) + if len(splits) != 3 { + continue + } + owner = splits[0] + repoName = splits[1] + image = splits[2] + + repo, has := repos[owner+"/"+repoName] + if !has { + repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) + if err != nil { + if models.IsErrRepoNotExist(err) { + continue + } + return err + } + repos[owner+"/"+repoName] = repo + } + + pkg, has := pkgs[event.Target.Repository] + if !has { + pkg, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) + if err != nil { + if models.IsErrPackageNotExist(err) { + // create a new pkg + err = models.AddPackage(models.AddPackageOptions{ + Repo: repo, + Name: image, + Type: models.PackageTypeDockerImage, + }) + if err != nil { + return err + } + continue + } + return err + } + if pkg != nil { + pkgs[event.Target.Repository] = pkg + } + } + + // update update time + pkg.UpdatedUnix = timeutil.TimeStamp(event.Timestamp.Unix()) + err = pkg.UpdateCols("updated_unix") + if err != nil { + return err + } + } + } + + return nil +} diff --git a/routers/repo/package.go b/routers/repo/package.go index c99468345a9be..8e7a22ab61f2e 100644 --- a/routers/repo/package.go +++ b/routers/repo/package.go @@ -5,24 +5,15 @@ package repo import ( - "encoding/base64" "encoding/json" - "fmt" - "math/rand" "net/http" - "regexp" - "sort" - "strings" - "time" - "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/packages/docker" "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/timeutil" "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/auth/token" ) // DockerPluginLogin token service for docker registry @@ -46,135 +37,40 @@ func DockerPluginLogin(ctx *context.Context) { return } // Authentication-only request ("docker login"), pass through. - handleResult(ctx, []authzResult{}) + handleResult(ctx, []docker.AuthzResult{}) return } - scops, err := handleScopes(scope) + scops, err := docker.SplitScopes(scope) if err != nil { ctx.Status(http.StatusBadRequest) return } - results := handlePermission(ctx, scops) - if ctx.Written() { + results, err := docker.PermissionCheck(ctx.User, scops) + if err != nil { + ctx.Error(500, "PermissionCheck") + log.Error("docker.PermissionCheck: %v", err) return } handleResult(ctx, results) } -func handlePermission(ctx *context.Context, scops []authScope) []authzResult { - rs := make([]authzResult, 0, len(scops)) - repos := make(map[string]*models.Repository) - repoPerms := make(map[int64]models.Permission) - - for _, scop := range scops { - hasPush := false - hasPull := false - needCreate := false - for _, act := range scop.Actions { - if act == "push" { - hasPush = true - continue - } - if act == "pull" { - hasPull = true - } - } - if hasPush && ctx.User == nil { - continue - } - splits := strings.SplitN(scop.Name, "/", 3) - var ( - owner string - repoName string - image string - ) - if len(splits) >= 2 { - owner = splits[0] - repoName = splits[1] - if len(splits) == 3 { - image = splits[2] - } else { - image = repoName - } - } - - repo, has := repos[owner+"/"+repoName] - var err error - if !has { - repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) - if err != nil { - if models.IsErrRepoNotExist(err) { - continue - } - ctx.Error(http.StatusInternalServerError, "handlePermission") - log.Error("docker: GetRepositoryByOwnerAndName: %v", err) - return nil - } - repos[owner+"/"+repoName] = repo - } - - _, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) - if err != nil { - if !models.IsErrPackageNotExist(err) { - ctx.Error(http.StatusInternalServerError, "handlePermission") - log.Error("docker: GetPackage: %v", err) - return nil - } - needCreate = true - } - - perm, has := repoPerms[repo.ID] - if !has { - perm, err = models.GetUserRepoPermission(repo, ctx.User) - if err != nil { - ctx.Error(http.StatusInternalServerError, "handlePermission") - log.Error("docker: GetUserRepoPermission: %v", err) - return nil - } - repoPerms[repo.ID] = perm - } - - events := make([]string, 0, 2) - accessMode := models.AccessModeRead - if hasPush { - accessMode = models.AccessModeRead - if needCreate { - accessMode = models.AccessModeAdmin - } - } - if perm.CanAccess(accessMode, models.UnitTypePackages) { - if hasPush { - events = append(events, "push") - } - if hasPull { - events = append(events, "pull") - } - } - - if len(events) == 0 { - continue - } - - rs = append(rs, authzResult{ - scope: scop, - autorizedActions: events, - }) - } - - return rs -} - -var resourceTypeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`) - -func handleResult(ctx *context.Context, ares []authzResult) { +func handleResult(ctx *context.Context, ares []docker.AuthzResult) { account := "" if ctx.User != nil { account = ctx.User.Name } - token, err := generateToken(account, ares) + token, err := docker.GenerateToken(docker.GenerateTokenOptions{ + Account: account, + IssuerName: setting.Docker.IssuerName, + AuthzResults: ares, + PublicKey: &setting.Docker.PublicKey, + PrivateKey: &setting.Docker.PrivateKey, + ServiceName: setting.Docker.ServiceName, + Expiration: setting.Docker.Expiration, + }) if err != nil { ctx.Error(http.StatusInternalServerError, "generateToken") log.Error("Failed to generate token: %v", err) @@ -190,130 +86,6 @@ func handleResult(ctx *context.Context, ares []authzResult) { } } -type authScope struct { - Type string - Class string - Name string - Actions []string -} - -func handleScopes(scope string) ([]authScope, error) { - scopes := strings.Split(scope, " ") - rs := make([]authScope, 0, len(scopes)) - for _, scopeStr := range scopes { - parts := strings.Split(scopeStr, ":") - var scope authScope - - scopeType, scopeClass, err := parseResourceType(parts[0]) - if err != nil { - return nil, err - } - - switch len(parts) { - case 3: - scope = authScope{ - Type: scopeType, - Class: scopeClass, - Name: parts[1], - Actions: strings.Split(parts[2], ","), - } - case 4: - scope = authScope{ - Type: scopeType, - Class: scopeClass, - Name: parts[1] + ":" + parts[2], - Actions: strings.Split(parts[3], ","), - } - default: - return nil, fmt.Errorf("invalid scope: %q", scopeStr) - } - sort.Strings(scope.Actions) - rs = append(rs, scope) - } - - return rs, nil -} - -func parseResourceType(scope string) (string, string, error) { - parts := resourceTypeRegex.FindStringSubmatch(scope) - if parts == nil { - return "", "", fmt.Errorf("malformed scope request") - } - - switch len(parts) { - case 3: - return parts[1], "", nil - case 4: - return parts[1], parts[3], nil - default: - return "", "", fmt.Errorf("malformed scope request") - } -} - -type authzResult struct { - scope authScope - autorizedActions []string -} - -func generateToken(account string, ares []authzResult) (string, error) { - now := time.Now().Unix() - - // Sign something dummy to find out which algorithm is used. - _, sigAlg, err := setting.Docker.PrivateKey.Sign(strings.NewReader("dummy"), 0) - if err != nil { - return "", fmt.Errorf("failed to sign: %s", err) - } - header := token.Header{ - Type: "JWT", - SigningAlg: sigAlg, - KeyID: setting.Docker.PublicKey.KeyID(), - } - headerJSON, err := json.Marshal(header) - if err != nil { - return "", fmt.Errorf("failed to marshal header: %s", err) - } - - claims := token.ClaimSet{ - Issuer: setting.Docker.IssuerName, - Subject: account, - Audience: setting.Docker.ServiceName, - NotBefore: now - 10, - IssuedAt: now, - Expiration: now + setting.Docker.Expiration, - JWTID: fmt.Sprintf("%d", rand.Int63()), - Access: []*token.ResourceActions{}, - } - for _, a := range ares { - ra := &token.ResourceActions{ - Type: a.scope.Type, - Name: a.scope.Name, - Actions: a.autorizedActions, - } - if ra.Actions == nil { - ra.Actions = []string{} - } - sort.Strings(ra.Actions) - claims.Access = append(claims.Access, ra) - } - claimsJSON, err := json.Marshal(claims) - if err != nil { - return "", fmt.Errorf("failed to marshal claims: %s", err) - } - - payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON)) - - sig, sigAlg2, err := setting.Docker.PrivateKey.Sign(strings.NewReader(payload), 0) - if err != nil || sigAlg2 != sigAlg { - return "", fmt.Errorf("failed to sign token: %s", err) - } - return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil -} - -// Copy-pasted from libtrust where it is private. -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - // DockerPluginEvent event service for docker registry func DockerPluginEvent(ctx *context.Context) { if !setting.HasDockerPlugin() { @@ -335,83 +107,17 @@ func DockerPluginEvent(ctx *context.Context) { err := decoder.Decode(data) if err != nil { ctx.Error(http.StatusInternalServerError) - log.Error("Failed to unmarshal event: %v", err) + log.Error("Failed to unmarshal events: %v", err) + return } - // handle events - repos := make(map[string]*models.Repository) - pkgs := make(map[string]*models.Package) - for _, event := range data.Events { - if event.Action == notifications.EventActionPush { - var ( - owner string - repoName string - image string - ) - splits := strings.SplitN(event.Target.Repository, "/", 3) - if len(splits) < 2 { - continue - } - owner = splits[0] - repoName = splits[1] - if len(splits) == 3 { - image = splits[2] - } else { - image = repoName - } - - repo, has := repos[owner+"/"+repoName] - if !has { - repo, err = models.GetRepositoryByOwnerAndName(owner, repoName) - if err != nil { - if models.IsErrRepoNotExist(err) { - continue - } - ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") - log.Error("docker: GetRepositoryByOwnerAndName: %v", err) - return - } - repos[owner+"/"+repoName] = repo - } - - pkg, has := pkgs[event.Target.Repository] - if !has { - pkg, err = models.GetPackage(repo.ID, models.PackageTypeDockerImage, image) - if err != nil { - if models.IsErrPackageNotExist(err) { - // create a new pkg - err = models.AddPackage(models.AddPackageOptions{ - Repo: repo, - Name: image, - Type: models.PackageTypeDockerImage, - }) - if err != nil { - ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") - log.Error("docker: AddPackage: %v", err) - return - } - continue - } - ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") - log.Error("docker: GetPackage: %v", err) - return - } - if pkg != nil { - pkgs[event.Target.Repository] = pkg - } - } - - // update update time - pkg.UpdatedUnix = timeutil.TimeStamp(event.Timestamp.Unix()) - err = pkg.UpdateCols("updated_unix") - if err != nil { - ctx.Error(http.StatusInternalServerError, "DockerPluginEvent") - log.Error("docker: UpdateCols(updated_unix): %v", err) - return - } - } + if err := docker.HandleEvents(data); err != nil { + ctx.Error(http.StatusInternalServerError) + log.Error("Failed to handle events: %v", err) + return } + // handle events ctx.JSON(200, map[string]string{ "result": "ok", }) From 3548d256b74707bad9dc938e9fc8f993907cbfa3 Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Tue, 9 Mar 2021 20:23:29 +0800 Subject: [PATCH 5/8] init third part api because if using token serve, it will be difficut to use docker registry api directly, try do someting to simplify it.` Signed-off-by: a1012112796 <1012112796@qq.com> --- custom/conf/app.example.ini | 4 +- modules/convert/package.go | 11 ++++ modules/packages/docker/api.go | 56 ++++++++++++++++++ modules/setting/package.go | 8 +++ modules/structs/package.go | 6 ++ routers/api/v1/api.go | 1 + routers/api/v1/repo/package.go | 104 ++++++++++++++++++++++++++++++++- routers/api/v1/swagger/repo.go | 7 +++ routers/repo/package.go | 2 +- templates/swagger/v1_json.tmpl | 74 +++++++++++++++++++++++ 10 files changed, 270 insertions(+), 3 deletions(-) create mode 100644 modules/packages/docker/api.go diff --git a/custom/conf/app.example.ini b/custom/conf/app.example.ini index 7c4f1ef6f6563..75565ba258ac5 100644 --- a/custom/conf/app.example.ini +++ b/custom/conf/app.example.ini @@ -1298,4 +1298,6 @@ KEY_FILE = /path/to/server.key ; Expiration time for token, should biger than 60 (s) EXPIRATION = 60 ; notify token for notify check -NOTIFY_TOKEN = xxxxxxxxx \ No newline at end of file +NOTIFY_TOKEN = xxxxxxxxx +; api base path +API_BASE_PATH = http://127.0.0.1:5000 diff --git a/modules/convert/package.go b/modules/convert/package.go index 0ae5fab059c0c..e032d00cbb94b 100644 --- a/modules/convert/package.go +++ b/modules/convert/package.go @@ -22,3 +22,14 @@ func ToPackage(pkg *models.Package) *api.Package { rs.Private = rs.Repo.Private return rs } + +// DockerToVersionList convert docker version list +func DockerToVersionList(vs []string) []*api.PackageVersion { + rs := make([]*api.PackageVersion, 0, len(vs)) + for _, v := range vs { + rs = append(rs, &api.PackageVersion{ + Name: v, + }) + } + return rs +} diff --git a/modules/packages/docker/api.go b/modules/packages/docker/api.go new file mode 100644 index 0000000000000..3a238d2b3fe9c --- /dev/null +++ b/modules/packages/docker/api.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "net/http" + "time" +) + +// API docker api port +type API struct { + APIBasePath string + Token string + TimeOut time.Duration + Ctx context.Context +} + +// ListImageTags Listing Image Tags +// GET /v2//tags/list +func (a *API) ListImageTags(name string) (*TagsAPIResponse, error) { + client := &http.Client{} + req, err := http.NewRequest("GET", a.APIBasePath+"/v2/"+name+"/tags/list", nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+a.Token) + + ctx, cancel := context.WithTimeout(a.Ctx, a.TimeOut) + defer cancel() + + req = req.WithContext(ctx) + + rsp, err := client.Do(req) + if err != nil { + return nil, err + } + defer rsp.Body.Close() + + v := new(TagsAPIResponse) + decoder := json.NewDecoder(rsp.Body) + if err = decoder.Decode(v); err != nil { + return nil, err + } + + return v, nil +} + +// TagsAPIResponse list tags response +type TagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} diff --git a/modules/setting/package.go b/modules/setting/package.go index a8c50506a0b3e..c25a263c2033c 100644 --- a/modules/setting/package.go +++ b/modules/setting/package.go @@ -7,6 +7,7 @@ package setting import ( "crypto/tls" "crypto/x509" + "strings" "code.gitea.io/gitea/modules/log" @@ -22,6 +23,8 @@ type dockerPluginConfig struct { PublicKey libtrust.PublicKey PrivateKey libtrust.PrivateKey + + APIBasePath string } var ( @@ -59,6 +62,11 @@ func newPackage() { if len(Docker.NotifyToken) == 0 { log.Fatal("docker_registry_plugin: `NOTIFY_TOKEN` is requested") } + Docker.APIBasePath = cfg.Key("API_BASE_PATH").String() + if len(Docker.APIBasePath) == 0 { + log.Fatal("docker_registry_plugin: `API_BASE_PATH` is requested") + } + Docker.APIBasePath = strings.TrimRight(Docker.APIBasePath, "/") } // HasDockerPlugin has docker plugin diff --git a/modules/structs/package.go b/modules/structs/package.go index bb88a7b2bbb5e..14490e5b3ce02 100644 --- a/modules/structs/package.go +++ b/modules/structs/package.go @@ -19,3 +19,9 @@ type Package struct { // swagger:strfmt date-time Updated *time.Time `json:"updated_at"` } + +// PackageVersion represents a package version +type PackageVersion struct { + Name string `json:"name"` + Detail string `json:"detail"` +} diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 701812265ccb3..75dab012d4fc9 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -953,6 +953,7 @@ func Routes() *web.Route { m.Get("/languages", reqRepoReader(models.UnitTypeCode), repo.GetLanguages) m.Group("/packages", func() { m.Get("/{type}/{name}", repo.GetPackage) + m.Get("/{type}/{name}/versions", repo.ListPackageVersions) }, reqRepoReader(models.UnitTypePackages)) }, repoAssignment()) }) diff --git a/routers/api/v1/repo/package.go b/routers/api/v1/repo/package.go index 019c27a55579e..6982324783399 100644 --- a/routers/api/v1/repo/package.go +++ b/routers/api/v1/repo/package.go @@ -6,10 +6,13 @@ package repo import ( "net/http" + "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/convert" + "code.gitea.io/gitea/modules/packages/docker" + "code.gitea.io/gitea/modules/setting" ) // GetPackage get a single package of a repository @@ -47,7 +50,7 @@ func GetPackage(ctx *context.APIContext) { // "$ref": "#/responses/notFound" typ := ctx.Params("type") name := ctx.Params("name") - if typ != models.PackageTypeDockerImage.String() { + if typ != models.PackageTypeDockerImage.String() || !setting.HasDockerPlugin() { ctx.NotFound() return } @@ -72,3 +75,102 @@ func GetPackage(ctx *context.APIContext) { ctx.JSON(http.StatusOK, convert.ToPackage(pkg)) } + +// ListPackageVersions list versions of a package +func ListPackageVersions(ctx *context.APIContext) { + // swagger:operation GET /repos/{owner}/{repo}/packages/{type}/{name}/versions repository repoListPackageVersions + // --- + // summary: list versions of a package + // produces: + // - application/json + // parameters: + // - name: owner + // in: path + // description: owner of the repo + // type: string + // required: true + // - name: repo + // in: path + // description: name of the repo + // type: string + // required: true + // - name: type + // in: path + // description: type of package + // type: string + // required: true + // - name: name + // in: path + // description: name of package + // type: string + // required: true + // responses: + // "200": + // "$ref": "#/responses/PackageVersionList" + // "404": + // "$ref": "#/responses/notFound" + typ := ctx.Params("type") + name := ctx.Params("name") + if typ != models.PackageTypeDockerImage.String() || !setting.HasDockerPlugin() { + ctx.NotFound() + return + } + pkg, err := models.GetPackage(ctx.Repo.Repository.ID, models.PackageTypeDockerImage, name) + if err != nil { + if models.IsErrPackageNotExist(err) { + ctx.NotFound() + } + ctx.InternalServerError(err) + return + } + + if err := pkg.LoadRepo(); err != nil { + ctx.InternalServerError(err) + return + } + + // generate token + perms := []docker.AuthzResult{ + { + Scope: docker.AuthScope{ + Type: "repository", + Class: "", + Name: pkg.Repo.OwnerName + "/" + pkg.Repo.Name + "/" + pkg.Name, + Actions: []string{"pull"}, + }, + AutorizedActions: []string{"pull"}, + }, + } + account := "" + if ctx.User != nil { + account = ctx.User.Name + } + token, err := docker.GenerateToken(docker.GenerateTokenOptions{ + Account: account, + IssuerName: setting.Docker.IssuerName, + AuthzResults: perms, + PublicKey: &setting.Docker.PublicKey, + PrivateKey: &setting.Docker.PrivateKey, + ServiceName: setting.Docker.ServiceName, + Expiration: setting.Docker.Expiration, + }) + if err != nil { + ctx.Error(http.StatusInternalServerError, "generateToken", err) + return + } + + api := &docker.API{ + APIBasePath: setting.Docker.APIBasePath, + Token: token, + TimeOut: 5 * time.Minute, + Ctx: ctx.Req.Context(), + } + + rs, err := api.ListImageTags(pkg.Repo.OwnerName + "/" + pkg.Repo.Name + "/" + pkg.Name) + if err != nil { + ctx.Error(500, "docker.ListImageTags", err) + return + } + + ctx.JSON(http.StatusOK, convert.DockerToVersionList(rs.Tags)) +} diff --git a/routers/api/v1/swagger/repo.go b/routers/api/v1/swagger/repo.go index 6576866a31c8b..eac6d6b5293c9 100644 --- a/routers/api/v1/swagger/repo.go +++ b/routers/api/v1/swagger/repo.go @@ -330,3 +330,10 @@ type swaggerResponsePackage struct { // in:body Body api.Package `json:"body"` } + +// PackageVersionList +// swagger:response PackageVersionList +type swaggerResponsePackageVersionList struct { + // in:body + Body []api.PackageVersion `json:"body"` +} diff --git a/routers/repo/package.go b/routers/repo/package.go index 8e7a22ab61f2e..5654f1554dd27 100644 --- a/routers/repo/package.go +++ b/routers/repo/package.go @@ -72,7 +72,7 @@ func handleResult(ctx *context.Context, ares []docker.AuthzResult) { Expiration: setting.Docker.Expiration, }) if err != nil { - ctx.Error(http.StatusInternalServerError, "generateToken") + ctx.Error(http.StatusInternalServerError, "") log.Error("Failed to generate token: %v", err) return } diff --git a/templates/swagger/v1_json.tmpl b/templates/swagger/v1_json.tmpl index ae80ff6eb4d63..3275081d653e1 100644 --- a/templates/swagger/v1_json.tmpl +++ b/templates/swagger/v1_json.tmpl @@ -6952,6 +6952,56 @@ } } }, + "/repos/{owner}/{repo}/packages/{type}/{name}/versions": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "repository" + ], + "summary": "list versions of a package", + "operationId": "repoListPackageVersions", + "parameters": [ + { + "type": "string", + "description": "owner of the repo", + "name": "owner", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "name of the repo", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "type of package", + "name": "type", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "name of package", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "$ref": "#/responses/PackageVersionList" + }, + "404": { + "$ref": "#/responses/notFound" + } + } + } + }, "/repos/{owner}/{repo}/pulls": { "get": { "produces": [ @@ -15117,6 +15167,21 @@ }, "x-go-package": "code.gitea.io/gitea/modules/structs" }, + "PackageVersion": { + "description": "PackageVersion represents a package version", + "type": "object", + "properties": { + "detail": { + "type": "string", + "x-go-name": "Detail" + }, + "name": { + "type": "string", + "x-go-name": "Name" + } + }, + "x-go-package": "code.gitea.io/gitea/modules/structs" + }, "PayloadCommit": { "description": "PayloadCommit represents a commit", "type": "object", @@ -16825,6 +16890,15 @@ } } }, + "PackageVersionList": { + "description": "PackageVersionList", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/PackageVersion" + } + } + }, "PublicKey": { "description": "PublicKey", "schema": { From 260fe615c6ca7ee4ac7d87e760e385ce90193a49 Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Wed, 10 Mar 2021 10:10:14 +0800 Subject: [PATCH 6/8] Update modules/structs/package.go --- modules/structs/package.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/structs/package.go b/modules/structs/package.go index 14490e5b3ce02..cc9d21a26aac6 100644 --- a/modules/structs/package.go +++ b/modules/structs/package.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Gogs Authors. All rights reserved. +// Copyright 2021 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. From 80b7e1d1fd2dd85f26aa3163e9b3ff87bdd79521 Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Thu, 11 Mar 2021 10:07:20 +0800 Subject: [PATCH 7/8] fix some spell mistake. Thanks @wULLSnpAXbWZGYDYyhWTKKspEQoaYxXyhoisqHf --- models/migrations/migrations.go | 2 +- models/package.go | 2 +- models/unit.go | 4 ++-- options/locale/locale_en-US.ini | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go index 708e5de9ef784..d3aba8f0d0e6e 100644 --- a/models/migrations/migrations.go +++ b/models/migrations/migrations.go @@ -296,7 +296,7 @@ var migrations = []Migration{ NewMigration("Add time_id column to Comment", addTimeIDCommentColumn), // v174 -> v175 NewMigration("create repo transfer table", addRepoTransfer), - // v155 -> v176 + // v175 -> v176 NewMigration("create package table", addPackageTable), } diff --git a/models/package.go b/models/package.go index af720adc64927..41974b95e6a77 100644 --- a/models/package.go +++ b/models/package.go @@ -59,7 +59,7 @@ func AddPackage(option AddPackageOptions) error { return err } -// GetPackage get an package +// GetPackage get a package func GetPackage(repoID int64, typ PackageType, name string) (*Package, error) { return getPackage(x, repoID, typ, name) } diff --git a/models/unit.go b/models/unit.go index 8a5ed55ad55d3..0716119955d55 100644 --- a/models/unit.go +++ b/models/unit.go @@ -260,7 +260,7 @@ var ( 5, } - UnitPacages = Unit{ + UnitPackages = Unit{ UnitTypePackages, "repo.packages", "/packages", @@ -278,7 +278,7 @@ var ( UnitTypeWiki: UnitWiki, UnitTypeExternalWiki: UnitExternalWiki, UnitTypeProjects: UnitProjects, - UnitTypePackages: UnitPacages, + UnitTypePackages: UnitPackages, } ) diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 6373d7bd439ff..139bf55a8e1da 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -1958,7 +1958,7 @@ topic.count_prompt = You can not select more than 25 topics topic.format_prompt = Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long. packages = packages -packages.desc = permission unit for third part packages +packages.desc = permission unit for third party packages [org] org_name_holder = Organization Name From ff44fca562afe0cf1b807b77c19fc72829bf3856 Mon Sep 17 00:00:00 2001 From: a1012112796 <1012112796@qq.com> Date: Sat, 17 Apr 2021 22:21:02 +0800 Subject: [PATCH 8/8] apply suggestions from code review --- models/package.go | 10 ++++++++-- modules/packages/docker/event_hook.go | 3 +-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/models/package.go b/models/package.go index 41974b95e6a77..c838e4574bf16 100644 --- a/models/package.go +++ b/models/package.go @@ -85,12 +85,18 @@ func getPackage(e Engine, repoID int64, typ PackageType, name string) (*Package, return pkg, nil } -// UpdateCols updates some columns -func (pkg *Package) UpdateCols(cols ...string) error { +// updateCols updates some columns +func (pkg *Package) updateCols(cols ...string) error { _, err := x.ID(pkg.ID).Cols(cols...).Update(pkg) return err } +// UpdateLastUpdated update last update time +func (pkg *Package) UpdateLastUpdated(updateTime timeutil.TimeStamp) error { + pkg.UpdatedUnix = updateTime + return pkg.updateCols("updated_unix") +} + // LoadRepo load package's repo func (pkg *Package) LoadRepo(cols ...string) (err error) { if pkg.Repo != nil { diff --git a/modules/packages/docker/event_hook.go b/modules/packages/docker/event_hook.go index 86c23d0f5390c..1869a961cfaa2 100644 --- a/modules/packages/docker/event_hook.go +++ b/modules/packages/docker/event_hook.go @@ -68,8 +68,7 @@ func HandleEvents(data *notifications.Envelope) error { } // update update time - pkg.UpdatedUnix = timeutil.TimeStamp(event.Timestamp.Unix()) - err = pkg.UpdateCols("updated_unix") + err = pkg.UpdateLastUpdated(timeutil.TimeStamp(event.Timestamp.Unix())) if err != nil { return err }