diff --git a/openshift/.bingo/Variables.mk b/openshift/.bingo/Variables.mk index 274cd7ec4..733f36514 100644 --- a/openshift/.bingo/Variables.mk +++ b/openshift/.bingo/Variables.mk @@ -1,4 +1,4 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.8. DO NOT EDIT. +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.9. DO NOT EDIT. # All tools are designed to be build inside $GOBIN. BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) GOPATH ?= $(shell go env GOPATH) @@ -7,16 +7,28 @@ GO ?= $(shell which go) # Below generated variables ensure that every time a tool under each variable is invoked, the correct version # will be used; reinstalling only if needed. -# For example for kustomize variable: +# For example for go-bindata variable: # # In your main Makefile (for non array binaries): # #include .bingo/Variables.mk # Assuming -dir was set to .bingo . # -#command: $(KUSTOMIZE) -# @echo "Running kustomize" -# @$(KUSTOMIZE) +#command: $(GO_BINDATA) +# @echo "Running go-bindata" +# @$(GO_BINDATA) # +GO_BINDATA := $(GOBIN)/go-bindata-v3.1.2+incompatible +$(GO_BINDATA): $(BINGO_DIR)/go-bindata.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/go-bindata-v3.1.2+incompatible" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=go-bindata.mod -o=$(GOBIN)/go-bindata-v3.1.2+incompatible "github.com/go-bindata/go-bindata/go-bindata" + +GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.1.6 +$(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/golangci-lint-v2.1.6" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.1.6 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" + KUSTOMIZE := $(GOBIN)/kustomize-v5.0.1 $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. diff --git a/openshift/.bingo/go-bindata.mod b/openshift/.bingo/go-bindata.mod new file mode 100644 index 000000000..387c94b99 --- /dev/null +++ b/openshift/.bingo/go-bindata.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.4 + +require github.com/go-bindata/go-bindata v3.1.2+incompatible // go-bindata diff --git a/openshift/.bingo/go-bindata.sum b/openshift/.bingo/go-bindata.sum new file mode 100644 index 000000000..402dde703 --- /dev/null +++ b/openshift/.bingo/go-bindata.sum @@ -0,0 +1,2 @@ +github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= diff --git a/openshift/.bingo/golangci-lint.mod b/openshift/.bingo/golangci-lint.mod new file mode 100644 index 000000000..9906dacd8 --- /dev/null +++ b/openshift/.bingo/golangci-lint.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.4 + +require github.com/golangci/golangci-lint/v2 v2.1.6 // cmd/golangci-lint diff --git a/openshift/.bingo/golangci-lint.sum b/openshift/.bingo/golangci-lint.sum new file mode 100644 index 000000000..3ad12260d --- /dev/null +++ b/openshift/.bingo/golangci-lint.sum @@ -0,0 +1,952 @@ +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= +github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/Antonboom/errname v1.1.0 h1:A+ucvdpMwlo/myWrkHEUEBWc/xuXdud23S8tmTb/oAE= +github.com/Antonboom/errname v1.1.0/go.mod h1:O1NMrzgUcVBGIfi3xlVuvX8Q/VP/73sseCaAppfjqZw= +github.com/Antonboom/nilnil v1.1.0 h1:jGxJxjgYS3VUUtOTNk8Z1icwT5ESpLH/426fjmQG+ng= +github.com/Antonboom/nilnil v1.1.0/go.mod h1:b7sAlogQjFa1wV8jUW3o4PMzDVFLbTux+xnQdvzdcIE= +github.com/Antonboom/testifylint v1.6.1 h1:6ZSytkFWatT8mwZlmRCHkWz1gPi+q6UBSbieji2Gj/o= +github.com/Antonboom/testifylint v1.6.1/go.mod h1:k+nEkathI2NFjKO6HvwmSrbzUcQ6FAnbZV+ZRrnXPLI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= +github.com/alecthomas/chroma/v2 v2.17.2 h1:Rm81SCZ2mPoH+Q8ZCc/9YvzPUN/E7HgPiPJD8SLV6GI= +github.com/alecthomas/chroma/v2 v2.17.2/go.mod h1:RVX6AvYm4VfYe/zsk7mjHueLDZor3aWCNE14TFlepBk= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= +github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= +github.com/alingse/nilnesserr v0.2.0/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= +github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= +github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= +github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= +github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= +github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= +github.com/butuzov/ireturn v0.4.0 h1:+s76bF/PfeKEdbG8b54aCocxXmi0wvYdOVsWxVO7n8E= +github.com/butuzov/ireturn v0.4.0/go.mod h1:ghI0FrCmap8pDWZwfPisFD1vEc56VKH4NpQUxDHta70= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/catenacyber/perfsprint v0.9.1 h1:5LlTp4RwTooQjJCvGEFV6XksZvWE7wCOUvjD2z0vls0= +github.com/catenacyber/perfsprint v0.9.1/go.mod h1:q//VWC2fWbcdSLEY1R3l8n0zQCDPdE4IjZwyY1HMunM= +github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= +github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.6 h1:RKuEOSkGpSadkGbvZ6hJ4ddItT3cVZ9Vn9Rybk6xjl8= +github.com/daixiang0/gci v0.13.6/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= +github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.6 h1:vmiBcKV/3EqKY3ZiPxCINmpS431OcE1S47AQUwhrg8E= +github.com/firefart/nonamedreturns v1.0.6/go.mod h1:R8NisJnSIpvPWheCq0mNRXJok6D8h7fagJTF8EMEwCo= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghostiam/protogetter v0.3.15 h1:1KF5sXel0HE48zh1/vn0Loiw25A9ApyseLzQuif1mLY= +github.com/ghostiam/protogetter v0.3.15/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/go-critic/go-critic v0.13.0 h1:kJzM7wzltQasSUXtYyTl6UaPVySO6GkaR1thFnJ6afY= +github.com/go-critic/go-critic v0.13.0/go.mod h1:M/YeuJ3vOCQDnP2SU+ZhjgRzwzcBW87JqLpMJLrZDLI= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint/v2 v2.1.6 h1:LXqShFfAGM5BDzEOWD2SL1IzJAgUOqES/HRBsfKjI+w= +github.com/golangci/golangci-lint/v2 v2.1.6/go.mod h1:EPj+fgv4TeeBq3TcqaKZb3vkiV5dP4hHHKhXhEhzci8= +github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8= +github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= +github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= +github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jgautheron/goconst v1.8.1 h1:PPqCYp3K/xlOj5JmIe6O1Mj6r1DbkdbLtR3AJuZo414= +github.com/jgautheron/goconst v1.8.1/go.mod h1:A0oxgBCHy55NQn6sYpO7UdnA9p+h7cPtoOZUmvNIako= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= +github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.14 h1:wAkMoMeGX/kGfhQBPODT/BL8XhK23ol/nuQ3SwFaUw8= +github.com/kunwardeep/paralleltest v1.0.14/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.3 h1:Ag1aGiq2epGePuRJhez2mzOpZ8sI9Gimcb4Sb3+pk9Y= +github.com/ldez/exptostd v0.4.3/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.1 h1:Z+PxGAY+217f/bSGjNZr/b2KTXcyYLgiWI6geMBN2Qc= +github.com/ldez/gomoddirectives v0.6.1/go.mod h1:cVBiu3AHR9V31em9u2kwfMKD43ayN5/XDgr+cdaFaKs= +github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= +github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.3 h1:pJpN0x3fMupdTf/IapYjnkhiY1nSTN+pox1/GyBRw3k= +github.com/ldez/usetesting v0.4.3/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= +github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/manuelarte/funcorder v0.2.1 h1:7QJsw3qhljoZ5rH0xapIvjw31EcQeFbF31/7kQ/xS34= +github.com/manuelarte/funcorder v0.2.1/go.mod h1:BQQ0yW57+PF9ZpjpeJDKOffEsQbxDFKW8F8zSMe/Zd0= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgechev/revive v1.9.0 h1:8LaA62XIKrb8lM6VsBSQ92slt/o92z5+hTw3CmrvSrM= +github.com/mgechev/revive v1.9.0/go.mod h1:LAPq3+MgOf7GcL5PlWIkHb0PT7XH4NuC2LdWymhb9Mo= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= +github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.8.0 h1:DL4RestQqRLr8U4LygLw8g2DX6RN1eBJOpa2mzsrl1Q= +github.com/polyfloyd/go-errorlint v1.8.0/go.mod h1:G2W0Q5roxbLCt0ZQbdoxQxXktTjwNyDbEaj3n7jvl4s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/quasilyte/go-ruleguard v0.4.4 h1:53DncefIeLX3qEpjzlS1lyUmQoUEeOWPFWqaTJq9eAQ= +github.com/quasilyte/go-ruleguard v0.4.4/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= +github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= +github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.22.3 h1:mRrCNmRF2NgZp4RJ8oJ6yPJ7G4x6OCiAXHd8x4trLRc= +github.com/securego/gosec/v2 v2.22.3/go.mod h1:42M9Xs0v1WseinaB/BmNGO8AVqG8vRfhC2686ACY48k= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= +github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.5.1 h1:PZnjCol4+FqaEzvZg5+O8IY2P3hfY9JzRBNPv1pEDS4= +github.com/tetafro/godot v1.5.1/go.mod h1:cCdPtEndkmqqrhiCfkmxDodMQJ/f3L1BCNskCUZdTwk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwTnnNKn4M= +github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU= +github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= +github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= +github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= +github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/musttag v0.13.1 h1:lw2sJyu7S1X8lc8zWUAdH42y+afdcCnHhWpnkWvd6vU= +go-simpler.org/musttag v0.13.1/go.mod h1:8r450ehpMLQgvpb6sg+hV5Ur47eH6olp/3yEanfG97k= +go-simpler.org/sloglint v0.11.0 h1:JlR1X4jkbeaffiyjLtymeqmGDKBDO1ikC6rjiuFAOco= +go-simpler.org/sloglint v0.11.0/go.mod h1:CFDO8R1i77dlciGfPEPvYke2ZMx4eyGiEIWkyeW2Pvw= +go.augendre.info/fatcontext v0.8.0 h1:2dfk6CQbDGeu1YocF59Za5Pia7ULeAM6friJ3LP7lmk= +go.augendre.info/fatcontext v0.8.0/go.mod h1:oVJfMgwngMsHO+KB2MdgzcO+RvtNdiCEOlWvSFtax/s= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +mvdan.cc/gofumpt v0.8.0 h1:nZUCeC2ViFaerTcYKstMmfysj6uhQrA2vJe+2vwGU6k= +mvdan.cc/gofumpt v0.8.0/go.mod h1:vEYnSzyGPmjvFkqJWtXkh79UwPWP9/HMxQdGEXZHjpg= +mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 h1:WjUu4yQoT5BHT1w8Zu56SP8367OuBV5jvo+4Ulppyf8= +mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4/go.mod h1:rthT7OuvRbaGcd5ginj6dA2oLE7YNlta9qhBNNdCaLE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/openshift/.bingo/variables.env b/openshift/.bingo/variables.env index caf5e6625..2a61fc383 100644 --- a/openshift/.bingo/variables.env +++ b/openshift/.bingo/variables.env @@ -1,4 +1,4 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.8. DO NOT EDIT. +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.9. DO NOT EDIT. # All tools are designed to be build inside $GOBIN. # Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. GOBIN=${GOBIN:=$(go env GOBIN)} @@ -8,6 +8,10 @@ if [ -z "$GOBIN" ]; then fi +GO_BINDATA="${GOBIN}/go-bindata-v3.1.2+incompatible" + +GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.1.6" + KUSTOMIZE="${GOBIN}/kustomize-v5.0.1" YQ="${GOBIN}/yq-v4.34.2" diff --git a/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json b/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json index d0a2125c9..d838eda39 100644 --- a/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json +++ b/openshift/tests-extension/.openshift-tests-extension/openshift_payload_olmv1.json @@ -1,6 +1,6 @@ [ { - "name": "[sig-olmv1] OLMv1 should pass a trivial sanity check", + "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should block cluster upgrades if an incompatible operator is installed", "labels": {}, "resources": { "isolation": {} @@ -10,7 +10,7 @@ "environmentSelector": {} }, { - "name": "[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 CRDs should be installed", + "name": "[sig-olmv1] OLMv1 should pass a trivial sanity check", "labels": {}, "resources": { "isolation": {} @@ -20,7 +20,7 @@ "environmentSelector": {} }, { - "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should install a cluster extension", + "name": "[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 CRDs should be installed", "labels": {}, "resources": { "isolation": {} @@ -30,7 +30,7 @@ "environmentSelector": {} }, { - "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should fail to install a non-existing cluster extension", + "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should install a cluster extension", "labels": {}, "resources": { "isolation": {} @@ -40,7 +40,7 @@ "environmentSelector": {} }, { - "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should block cluster upgrades if an incompatible operator is installed", + "name": "[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should fail to install a non-existing cluster extension", "labels": {}, "resources": { "isolation": {} diff --git a/openshift/tests-extension/Makefile b/openshift/tests-extension/Makefile index 8045b802d..05693ee40 100644 --- a/openshift/tests-extension/Makefile +++ b/openshift/tests-extension/Makefile @@ -1,7 +1,8 @@ # Get the directory where this Makefile is, so we can use it below for including # Include the same Bingo variables used by the project DIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) -include $(DIR)/../../.bingo/Variables.mk +# Use openshift's Bingo variables +include $(DIR)/../.bingo/Variables.mk # Definitions for the extended tests @@ -63,11 +64,27 @@ lint: $(GOLANGCI_LINT) #HELP Run golangci linter. fix-lint: $(GOLANGCI_LINT) #HELP Fix lint issues $(GOLANGCI_LINT) run --fix +# Bindata generation +.PHONY: bindata +bindata: $(GO_BINDATA) + +bindata: pkg/bindata/operator/operator.go +pkg/bindata/operator/operator.go: $(shell find testdata/operator -type f) + mkdir -p $(@D) + $(GO_BINDATA) -pkg operator -o $@ -prefix "testdata/operator" testdata/operator/... + go fmt ./$(@D)/... + +bindata: pkg/bindata/catalog/catalog.go +pkg/bindata/catalog/catalog.go: $(shell find testdata/catalog -type f) + mkdir -p $(@D) + $(GO_BINDATA) -pkg catalog -o $@ -prefix "testdata/catalog" testdata/catalog/... + go fmt ./$(@D)/... + # GO_COMPLIANCE_POLICY="exempt_all" must only be used for test related binaries. # It prevents various FIPS compliance policies from being applied to this compilation. # Do not set globally. .PHONY: build -build: #HELP Build the extended tests binary +build: bindata #HELP Build the extended tests binary @mkdir -p $(TOOLS_BIN_DIR) GO_COMPLIANCE_POLICY="exempt_all" go build -ldflags "$(LDFLAGS)" -mod=vendor -o $(TOOLS_BIN_DIR)/olmv1-tests-ext ./cmd/... diff --git a/openshift/tests-extension/go.mod b/openshift/tests-extension/go.mod index d99a435f7..52902d372 100644 --- a/openshift/tests-extension/go.mod +++ b/openshift/tests-extension/go.mod @@ -3,10 +3,11 @@ module github/operator-framework-operator-controller/openshift/tests-extension go 1.24.3 require ( + github.com/blang/semver/v4 v4.0.0 github.com/onsi/ginkgo/v2 v2.23.3 github.com/onsi/gomega v1.37.0 github.com/openshift-eng/openshift-tests-extension v0.0.0-20250722101414-8083129ab8f9 - github.com/openshift/api v0.0.0-20250718204806-3333746edfbf + github.com/openshift/api v0.0.0-20250808142411-c974eeafe3f1 github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee github.com/operator-framework/operator-controller v1.3.0 github.com/spf13/cobra v1.9.1 @@ -14,6 +15,7 @@ require ( k8s.io/apiextensions-apiserver v0.33.2 k8s.io/apimachinery v0.33.3 k8s.io/client-go v0.33.2 + k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.21.0 ) @@ -62,7 +64,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect diff --git a/openshift/tests-extension/go.sum b/openshift/tests-extension/go.sum index 9204d41c1..c36050505 100644 --- a/openshift/tests-extension/go.sum +++ b/openshift/tests-extension/go.sum @@ -70,8 +70,8 @@ github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250722101414-8083129ab8f9 h1:4ZeSM80DVCb5WWB3Q/fyCI9jYXAl9bfrGnFvFONqzN4= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250722101414-8083129ab8f9/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20250718204806-3333746edfbf h1:NFtgSlz4oqI8CHOenkBK5Xq+hLJhe3c9fkyxMSysgIA= -github.com/openshift/api v0.0.0-20250718204806-3333746edfbf/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM= +github.com/openshift/api v0.0.0-20250808142411-c974eeafe3f1 h1:VElrUno5AG2Zl6M+2pYPiXXPfNGpeb+0v95sl8AAczw= +github.com/openshift/api v0.0.0-20250808142411-c974eeafe3f1/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM= github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee h1:tOtrrxfDEW8hK3eEsHqxsXurq/D6LcINGfprkQC3hqY= github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee/go.mod h1:zhRiYyNMk89llof2qEuGPWPD+joQPhCRUc2IK0SB510= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= diff --git a/openshift/tests-extension/pkg/bindata/catalog/catalog.go b/openshift/tests-extension/pkg/bindata/catalog/catalog.go new file mode 100644 index 000000000..d7f1f39aa --- /dev/null +++ b/openshift/tests-extension/pkg/bindata/catalog/catalog.go @@ -0,0 +1,294 @@ +// Code generated for package catalog by go-bindata DO NOT EDIT. (@generated) +// sources: +// testdata/catalog/Dockerfile +// testdata/catalog/configs/.indexignore +// testdata/catalog/configs/index.yaml +package catalog + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dockerfile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0b\xf2\xf7\x55\x28\x4e\x2e\x4a\x2c\x49\xce\xe0\x72\x74\x71\x51\x48\xce\xcf\x4b\xcb\x4c\x2f\x56\xd0\x87\x32\xb8\x7c\x1c\x9d\x5c\x7d\x14\xf2\x0b\x52\x8b\x12\x4b\xf2\x8b\x8a\xf5\x60\xac\xb4\xa2\xc4\xdc\xd4\xf2\xfc\xa2\x6c\xbd\xcc\x7c\xbd\xcc\xbc\x94\xd4\x0a\x3d\xa8\x16\xbd\x32\x43\x5b\xb8\x76\x40\x00\x00\x00\xff\xff\x47\x5b\xe3\x6f\x61\x00\x00\x00") + +func dockerfileBytes() ([]byte, error) { + return bindataRead( + _dockerfile, + "Dockerfile", + ) +} + +func dockerfile() (*asset, error) { + bytes, err := dockerfileBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "Dockerfile", size: 97, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _configsIndexignore = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd3\xd3\xe2\x02\x04\x00\x00\xff\xff\xcd\x49\xc4\xdc\x04\x00\x00\x00") + +func configsIndexignoreBytes() ([]byte, error) { + return bindataRead( + _configsIndexignore, + "configs/.indexignore", + ) +} + +func configsIndexignore() (*asset, error) { + bytes, err := configsIndexignoreBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "configs/.indexignore", size: 4, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _configsIndexYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x91\x31\x4f\xc3\x30\x10\x85\xf7\xfc\x8a\x93\xe7\x24\x0d\x03\x4b\xb6\x02\x19\x90\x20\x45\xa4\x74\x41\x0c\x26\x1c\x8d\x85\xed\x58\xb6\x6b\xa8\xaa\xfc\x77\x64\x27\x55\x69\x71\x61\x49\x2c\x7d\x77\xef\xee\xde\xdb\x25\x00\xc4\xb4\x1d\x0a\x4a\x4a\x20\x3d\x17\xb9\xa2\xed\x07\x5d\x23\x49\x3d\x92\x54\xa0\x07\xcb\xaa\x59\x66\x57\x4f\xf5\xcd\x5d\x45\x92\x21\x89\xb4\xbd\x6e\xe4\x1b\x3f\xdf\x95\xbb\x22\x2f\xf2\x8b\x91\xef\x47\x9c\x08\x07\xc6\xc4\x44\xc2\x23\xd3\xb8\x66\xc6\xea\x6d\xde\x2b\x94\xa6\x63\xef\x36\x3b\x01\xc6\xb5\xe5\x65\x51\x14\xb3\x7a\x7e\x5f\x35\x0f\xf3\xeb\x6a\xf6\x43\xb4\xe4\xd4\xa2\xb1\xd3\x5c\xdd\x2b\xd4\x96\xa1\x21\x25\x3c\x27\x00\x00\xbb\xf0\x05\x20\x76\xab\x30\x66\x41\x80\x8e\xf2\x8d\xa7\xfb\xea\xc3\x0d\x75\xc4\xa0\xf4\x50\xe5\x50\x1b\xd6\x4b\x5f\x31\xde\x3f\xa1\x21\xfc\x87\xf4\xfc\x0e\x82\x7e\x2d\x14\xca\xc6\x9f\xbc\x9a\x54\x7e\xed\x43\x56\xd5\x63\x73\xbb\xa8\x47\x59\x2f\xfa\x12\x4f\xa7\xed\xa8\x94\xc8\x8f\xe3\x51\x1a\x1d\xc3\xcf\xff\x33\x41\x69\x75\xd4\xb4\x3f\x82\x3e\xda\xe8\x3b\x00\x00\xff\xff\x5d\x4e\xb3\xae\x67\x02\x00\x00") + +func configsIndexYamlBytes() ([]byte, error) { + return bindataRead( + _configsIndexYaml, + "configs/index.yaml", + ) +} + +func configsIndexYaml() (*asset, error) { + bytes, err := configsIndexYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "configs/index.yaml", size: 615, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "Dockerfile": dockerfile, + "configs/.indexignore": configsIndexignore, + "configs/index.yaml": configsIndexYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "Dockerfile": &bintree{dockerfile, map[string]*bintree{}}, + "configs": &bintree{nil, map[string]*bintree{ + ".indexignore": &bintree{configsIndexignore, map[string]*bintree{}}, + "index.yaml": &bintree{configsIndexYaml, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/openshift/tests-extension/pkg/bindata/operator/operator.go b/openshift/tests-extension/pkg/bindata/operator/operator.go new file mode 100644 index 000000000..5e07f17c3 --- /dev/null +++ b/openshift/tests-extension/pkg/bindata/operator/operator.go @@ -0,0 +1,346 @@ +// Code generated for package operator by go-bindata DO NOT EDIT. (@generated) +// sources: +// testdata/operator/Dockerfile +// testdata/operator/manifests/registry.clusterserviceversion.yaml +// testdata/operator/metadata/annotations.yaml +// testdata/operator/metadata/properties.yaml +// testdata/operator/tests/scorecard/config.yaml +package operator + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dockerfile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\xc1\x6e\xa3\x40\x0c\x86\xef\x79\x0a\x4b\x7b\x5c\x65\x10\xda\x5c\x39\xec\x46\xdb\x53\xaa\x54\xbd\xf5\x54\x99\xc1\x90\x29\x13\x3c\xf2\x18\x2a\xde\xbe\x82\x00\x6d\xda\x0b\xdc\x06\xc6\xfe\xf8\xb1\xbf\x87\xe7\xf3\x23\x44\x2b\xa8\xf6\xb2\xdb\xfd\x82\x23\x0b\x41\xde\x36\x85\x27\xf0\x98\x93\x8f\x66\x77\xfa\xfb\xef\xff\x09\x38\x90\xa0\xb2\x44\x33\x9f\x4a\xc1\x2b\xbd\xb3\xd4\xc6\xb1\xb9\xf5\x98\x2b\x15\x0e\xb5\x0f\x64\xba\x34\x13\xaa\x5c\x54\xe9\x7f\x77\xe9\x36\x08\x36\xae\xa4\xa8\x71\x80\x2c\x0f\xc9\xc6\x20\x8a\x05\x2a\x8e\x88\xe9\xbc\x8d\x10\xd0\xd6\x58\xdd\xfd\xc8\xa6\x7e\x7b\xc1\xa6\x19\x06\xd8\xa5\x19\xfa\x70\xc1\x75\xdd\x57\x52\x71\x36\x9a\xbc\x75\xbe\x20\xc9\xe6\xa2\x7d\x2c\xea\x7d\x97\x9a\x3f\x07\xb3\x72\x9a\x33\xe9\x6e\x27\xd3\xcb\xd5\x2b\x99\x21\x41\xf8\x8d\xac\xbe\x7a\xec\xb9\xd5\xac\x62\x53\xb7\x39\x4d\x21\x8d\xe3\xa4\x3b\x0c\xfe\x9c\x46\x67\xa0\x64\x01\xa5\xa8\xae\xa9\x56\xfa\x33\x54\xdf\x27\x8d\x96\x85\x2c\x4a\xb1\x3a\xeb\xc8\xb0\xdc\x94\xae\x1a\x00\x3a\x5a\xb3\x60\x92\x9b\xdf\xa1\x87\xd2\x79\x8a\xa0\x0c\x9e\x2d\xaa\xe3\x26\x42\x0c\x64\x5d\xe9\xa8\x80\xbc\x5f\xc4\x3f\x9e\x9f\x5e\x60\xf1\x0f\x92\x2f\x2a\xde\xae\x26\xaf\x20\xf9\x34\x6c\xbc\xf8\xf6\x65\x48\x7e\x44\xf9\x08\x00\x00\xff\xff\xa9\xb8\xee\xcf\x78\x03\x00\x00") + +func dockerfileBytes() ([]byte, error) { + return bindataRead( + _dockerfile, + "Dockerfile", + ) +} + +func dockerfile() (*asset, error) { + bytes, err := dockerfileBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "Dockerfile", size: 888, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _manifestsRegistryClusterserviceversionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x57\x4b\x6f\xe3\xb6\x13\xbf\xfb\x53\x0c\x74\xfe\xd3\xb1\xb2\xd9\xfd\x07\x3c\x35\xfb\xc0\xa2\x40\x92\x0d\x36\x69\x0f\x2d\x8a\x62\x4c\x4e\x6c\xd6\x14\xc9\x92\x94\x13\xed\x76\xbf\x7b\x41\x3d\x6c\xc9\x52\x12\x2f\xda\x4b\x79\x91\x34\xf3\x9b\x27\x87\x9c\x11\x3a\xf5\x33\xf9\xa0\xac\xe1\x60\x1d\x79\x8c\xd6\x87\xb9\xb0\x9e\x6c\x7a\x14\x27\xdb\x1c\xb5\x5b\x63\x3e\xdb\x28\x23\x39\xbc\xd3\x65\x88\xe4\x6f\xc9\x6f\x95\xa0\x56\x74\x56\x50\x44\x89\x11\xf9\x0c\x00\x8d\xb1\x11\xa3\xb2\x26\xa4\x4f\x00\xd4\x05\xa3\x47\x2c\x9c\xa6\xc0\xe1\x2f\x56\x13\x01\x7e\x6d\x9f\x00\x5f\x77\x6f\x00\xd9\xde\xa1\x8c\x43\x66\x05\xb3\x4e\xd2\x96\xd1\x29\xcd\x3b\xff\xee\x3d\x16\xf4\x60\xfd\x66\xae\xec\xfc\x45\xc4\x2e\x82\xec\x7f\x7d\x3b\x29\x9c\x64\xe1\x33\xad\x54\x88\xbe\x1a\x72\xbb\x80\x32\x3e\xf0\x0e\x20\xd3\xb8\x24\x1d\x46\xf4\xda\x73\x37\xdf\x94\x4b\xf2\x86\x22\x85\x64\xb9\x40\x83\x2b\x92\x6c\x59\x25\x4b\x9b\x32\x44\x5b\xa8\x2f\x34\x30\xf5\x84\xa8\xc1\x82\x92\xd0\xdd\x87\xdb\x3b\xf6\xf6\xa7\xeb\xf7\x97\x1f\xb2\x81\xd4\xb7\xa1\x92\x6c\x42\x80\x85\x3a\xe9\x7d\xb9\x81\x54\x16\x1c\x89\x8c\x83\x29\xb5\xde\x91\xbf\xb5\x6f\xbf\xd5\x4f\x81\x0e\x97\x4a\xab\xa8\xd2\xde\xbd\xc5\xa0\x04\xfc\x68\x42\xc4\x56\x42\x78\xc2\x48\xf2\x22\x72\xc8\x4e\x17\xa7\xaf\xd9\xe2\x9c\x2d\xde\xdc\xe5\xff\xe7\x67\xaf\xf8\x59\xfe\x4b\x63\x7b\x5f\x58\x93\x1b\xb4\x2c\x95\x96\xe4\xf7\x05\xc8\x82\xdc\xb0\x6d\x3e\x7f\x75\x36\xcf\x8f\x51\xe0\xbc\xfd\x83\x44\xfc\x5d\x63\x65\xcb\xc8\x61\x65\xeb\x6c\xb6\x7a\xeb\x1a\x38\x9b\x01\xa4\x14\x71\xe8\x25\x68\xbe\x5d\xcc\x17\xb5\x89\xc4\x0a\x0e\x05\x71\x70\x1a\x05\xad\x6d\x92\x9c\xa5\x04\xd5\x35\xed\x54\x68\x2a\x5e\xd2\xbd\x32\xaa\xa9\x6e\xf8\x9a\x92\x25\xea\x6d\xf5\x14\x6c\xe9\xfb\xfc\x96\x2d\x29\x08\xaf\x5c\x43\xe9\xd9\xee\x33\xe6\x70\xf7\xe9\xfd\xa7\x79\x42\xab\xe0\x34\x56\xd7\x87\x9e\xce\x00\x94\xb0\x26\xf9\xc2\x60\x89\x81\xde\x9c\xd5\xe7\x0d\xb2\x26\xc3\x05\x49\x85\xb1\x72\xd4\x52\x54\xb3\x47\xcd\x01\xec\xa2\x48\x4b\x92\xd3\xb6\x2a\xc8\xc4\xd0\x91\x18\xd4\x35\xcd\x7b\x95\xf1\x5c\x29\x73\xd8\x15\xf2\xb3\x12\xa3\x6c\xf7\xd0\xc2\x9a\xe8\xad\x66\x4e\xa3\x21\xde\x7d\x6a\xf2\xac\xb1\xe3\x77\xd8\x91\x16\xf6\x0c\xb8\x1f\x68\x5a\x9e\x9c\x56\x02\x03\x87\xbc\x47\x0d\xa4\x49\x44\xeb\xf9\xe0\x00\x15\x18\xc5\xfa\xb2\x3e\xdc\xfc\xe0\x78\x1e\xef\x2d\x40\x88\x1e\x23\xad\xaa\x76\xf7\xbb\x15\xa9\x70\x1a\x23\x1d\xd8\xec\x5d\x9b\xfd\x35\xba\x42\xfb\x2b\x65\x59\x44\x7d\x90\x6d\x49\xf7\x58\xea\x58\x67\x07\x95\x49\xa7\x69\xec\x5c\x5a\x7a\x32\xc4\xef\x0b\x72\x9c\xe9\x4e\x43\x6d\x7a\xa4\x9d\x01\xfa\xd5\xa4\xcd\xa2\x40\x23\xc7\x0c\x06\x41\x13\xb9\x09\x7a\x96\x2f\x16\x8b\x6c\xc4\x50\x05\xae\x88\xc3\xb2\x0c\xd5\xd2\x3e\x8e\xd8\x5a\x6d\xc9\x50\x08\x37\xde\x2e\x69\x6c\x0e\x60\x1d\xa3\xfb\x48\x71\x8a\x05\xe0\x30\xae\x39\x9c\xac\x09\x75\x5c\x7f\x99\x86\x58\x1f\x39\x9c\x2f\xce\xf3\x09\x76\x7d\x21\xa0\x7e\x4f\x1a\xab\x5b\x12\xd6\xc8\x54\x91\xaf\x27\x90\x8e\xbc\xb2\x72\x87\x39\x5d\x8c\x30\xcd\x79\x98\xde\xda\x54\xf0\x28\xd5\x3f\x0e\x34\x69\xa9\xfe\xad\x38\x8f\x08\x33\x1f\x87\xd9\x5d\xa7\x13\x45\x93\x76\xb3\x50\x71\x92\x03\x20\x5c\xc9\xe1\xf5\x62\x51\x4c\x72\x0b\x2a\xac\xaf\x38\xe4\xa7\xe7\x57\x6a\x02\xe1\xe9\xcf\x92\xc2\xb3\xba\xf3\x17\x54\xbf\x39\x9b\xd0\x1c\x48\x94\x5e\xc5\xea\x9d\x35\x91\x1e\x27\xb3\x8f\x5a\xdb\x87\x1b\xaf\xb6\x4a\xd3\x8a\x3e\x04\x81\x1a\x9b\xa6\x71\x8f\x3a\xd0\x84\xc4\xa0\x37\x4f\xfa\x24\xbd\x75\xd3\x1c\x06\x17\x97\x97\xb3\xef\x74\xd2\x97\xe6\x22\x5c\x5b\xf3\xd9\xda\xc8\x21\xfa\xf2\xd0\xab\xb6\x47\x5e\x08\x61\x4b\x13\x9b\x26\xd6\x5e\x4c\x07\xc8\x48\xbe\x50\xa6\x0e\xf0\xa3\x47\x41\x37\x4f\x54\x84\x4b\xb8\x10\xfa\x37\x21\x03\x5f\xea\x7e\xc4\x2c\xb5\xe7\x8f\xde\x96\x6e\x90\x06\xd6\xf5\xc6\xd6\xf9\xa9\x8a\x62\xe9\xd2\xba\x57\xab\x02\x5d\xe8\x91\xb7\xe4\x97\x07\xb8\x15\xc5\xc1\xb7\x56\x61\x48\x78\x48\xfd\x63\xa8\xba\x9e\x8d\x06\xa4\xd2\xc9\x43\x92\x1b\xc9\x49\xd2\xd4\x03\x3d\x19\x9e\xb0\xd6\xcb\x36\x89\xf3\xcd\x79\x6a\x03\x2f\xc7\xab\x09\x03\xfd\xf7\x62\x3d\x66\x2b\x69\x9b\xa6\x9a\xe7\x43\x9b\xf0\x73\xe8\xd4\x54\x09\x1f\x31\x7d\xec\xdb\xfe\x7e\xc0\xda\x8f\x61\x57\x56\x36\xce\x32\x08\xa5\x4b\x77\x28\xc9\xfe\xb9\x6e\x06\xb7\x4f\x0f\xe6\xba\x1b\x43\x5f\xc2\xde\x2a\xb3\xd2\x74\x34\xfc\xaa\xd4\x51\x3d\x89\xde\x9d\xe4\x06\x7c\xa1\xf5\x0e\x9a\xd2\xb9\xa1\xea\xc1\x7a\xd9\x06\x30\x1c\xe8\xb4\x32\x9b\x96\x31\x3d\xef\x95\x5e\xf3\xba\xe9\x04\x7e\x72\xd2\x1f\xbd\xa5\x2d\x50\x99\x59\x1a\xbc\x54\x7f\x6e\x60\x40\x05\x2a\xcd\xa1\xb2\xa5\xff\xa1\x7e\x4f\xff\xa1\xb5\xb2\xc6\xc4\xd5\x4e\x00\x92\x9f\xb5\x8a\x58\xdf\x5c\x1c\xea\x3f\xbd\x19\x80\xf3\x76\xab\xd2\x3f\x45\x4f\xee\xa6\xa5\x75\x52\x07\xce\x25\x7b\x7b\xaf\xb6\xdd\x4f\x71\xf3\x87\xf0\x77\x00\x00\x00\xff\xff\xb7\xc0\xfc\x99\x27\x0f\x00\x00") + +func manifestsRegistryClusterserviceversionYamlBytes() ([]byte, error) { + return bindataRead( + _manifestsRegistryClusterserviceversionYaml, + "manifests/registry.clusterserviceversion.yaml", + ) +} + +func manifestsRegistryClusterserviceversionYaml() (*asset, error) { + bytes, err := manifestsRegistryClusterserviceversionYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "manifests/registry.clusterserviceversion.yaml", size: 3879, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _metadataAnnotationsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\xbd\x6e\xac\x40\x0c\x85\x7b\x9e\xc2\xd2\x2d\xaf\xd6\x2b\x94\xad\xe8\xa2\x3c\x48\x64\x66\x0c\x3b\x01\xc6\xc8\x63\x88\x78\xfb\x28\xfc\x25\x9b\x26\x93\x0e\x83\xcf\xa7\x4f\xf8\x50\x8c\x62\x64\x41\x62\xaa\x0a\x80\x7f\xf0\x22\xca\x50\x4f\xd1\xf7\x0c\xdf\x3e\x62\x01\x20\x23\x2b\x99\x68\xc2\xe3\xa9\x51\x1a\xf8\x5d\xb4\xc3\x20\xb8\x85\x70\x60\x1f\xc8\x96\x91\x71\x2e\x2b\x50\x6e\x43\x32\x5d\xfe\xcf\x65\x3e\x81\x62\x68\x38\x59\x5a\x09\xe7\x74\xfd\x83\x82\x91\x27\xa3\x2d\xbf\x0f\xf9\xf1\x91\x5c\x47\xed\xa3\x7f\x76\xd8\xdd\x29\x46\xee\x37\x77\xea\xc7\x3b\xfd\x1e\x1d\xd8\x34\xb8\x84\xf5\x14\x7a\xcf\x5a\x9d\xfb\x97\xe4\xbb\xcb\x5c\xe2\xd3\x0d\x33\x7e\xdf\x81\x79\xbc\xc0\xfe\x36\xeb\x00\x07\x61\x54\x79\x63\x67\xaf\x3d\x2d\x32\x59\x05\xad\x60\x37\xd5\xbc\xfb\x61\x90\xeb\x7c\x2b\xd6\xba\x3c\x7f\x55\x04\x1a\x51\x30\x4e\x16\x62\x9b\x51\x97\xcf\xcd\x1f\xaa\xc9\x89\xb2\x23\xf5\x59\xb2\x2b\xc0\x49\x6c\x42\xbb\xa6\x6d\x2d\xc9\xc9\xb8\x16\x1f\x01\x00\x00\xff\xff\xdc\xc6\x0a\xf0\xdc\x02\x00\x00") + +func metadataAnnotationsYamlBytes() ([]byte, error) { + return bindataRead( + _metadataAnnotationsYaml, + "metadata/annotations.yaml", + ) +} + +func metadataAnnotationsYaml() (*asset, error) { + bytes, err := metadataAnnotationsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "metadata/annotations.yaml", size: 732, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _metadataPropertiesYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2a\x28\xca\x2f\x48\x2d\x2a\xc9\x4c\x2d\xb6\xe2\x52\x50\xd0\x55\x28\xa9\x2c\x48\xb5\x52\xc8\xcf\xc9\xd5\xcb\x4d\xac\xf0\x2f\x48\xcd\x0b\xce\xc8\x4c\x2b\x09\x4b\x2d\x2a\xce\xcc\xcf\xe3\x52\x50\x50\x50\x28\x4b\xcc\x29\x4d\xb5\x52\x50\x0a\x73\x0d\x0a\xf6\xf4\xf7\x53\xe2\x02\x04\x00\x00\xff\xff\xf3\xb7\x04\x1f\x43\x00\x00\x00") + +func metadataPropertiesYamlBytes() ([]byte, error) { + return bindataRead( + _metadataPropertiesYaml, + "metadata/properties.yaml", + ) +} + +func metadataPropertiesYaml() (*asset, error) { + bytes, err := metadataPropertiesYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "metadata/properties.yaml", size: 67, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _testsScorecardConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x93\x3d\x6e\xc3\x30\x0c\x85\x77\x9f\x82\x17\x50\x82\xa2\x9b\xd6\x5e\xa0\x53\x77\x46\x66\x62\xc1\xb2\xa8\x92\x54\x82\xa0\xe8\xdd\x0b\x3b\x7f\x4d\x93\xa5\x05\xea\x6c\x36\xfd\xa8\xef\x7d\x06\x84\x25\xbe\x91\x68\xe4\xec\x41\x03\x0b\x05\x94\x76\xc1\x85\x04\x8d\x65\x2d\x38\xd0\x8e\xa5\x5f\x44\x5e\x6e\x9f\x30\x95\x0e\x9f\x9b\x3e\xe6\xd6\xc3\x0b\xe7\x75\xdc\x54\x41\x8b\x9c\x9b\x81\x0c\x5b\x34\xf4\x0d\x40\xc6\x81\x3c\x84\xe9\x7b\xa3\x86\x1b\x52\xdf\x38\x28\x28\x98\x12\x25\x0f\x26\x95\x1a\x00\x23\x35\x1d\x17\x1c\x50\x36\xd9\x17\x8e\xd9\xc6\xf7\x71\x72\x2e\xe3\xc6\xd8\x71\xb8\x42\x8d\xc1\x85\x8e\x42\xef\xb4\x50\x98\xc6\x71\xc0\x0d\x79\x78\xaf\xb8\x1f\x6b\x9e\xba\xbb\x73\xf9\xe5\xf5\x59\xbe\xe6\x3e\xf3\x2e\x4f\xcb\x09\x57\x94\xf4\x00\x05\xd0\x1a\x8d\xfc\x01\x73\x1c\x4d\x1b\x37\xe0\x4b\x29\x35\x96\x91\x7f\x3a\xa1\x50\x38\x3d\x03\x0c\x5c\xb3\xbd\xa2\x75\x1e\x3e\x3e\x7f\x23\xca\x69\x70\xab\x9a\xdb\x44\x6e\x8b\x29\xb6\x87\x9f\xfc\x3f\xb6\x9c\x86\x2b\xd7\xbb\xec\x39\x84\x83\xb4\xea\x3a\xdc\x3e\xc8\xf9\x1e\x7e\x5e\x6d\x21\xe5\x2a\x81\xf4\x21\xd6\x67\xfa\x1c\xd2\xd3\x25\x6a\x49\x83\xc4\x62\x2c\x73\x1a\xff\x44\xcf\xa2\x6b\x68\x55\x1f\x25\x7c\x03\xff\x93\xf2\xb7\xe8\x25\x76\x1d\xf9\x0a\x00\x00\xff\xff\xb8\xc2\xd4\xda\x4e\x06\x00\x00") + +func testsScorecardConfigYamlBytes() ([]byte, error) { + return bindataRead( + _testsScorecardConfigYaml, + "tests/scorecard/config.yaml", + ) +} + +func testsScorecardConfigYaml() (*asset, error) { + bytes, err := testsScorecardConfigYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "tests/scorecard/config.yaml", size: 1614, mode: os.FileMode(420), modTime: time.Unix(1755024325, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "Dockerfile": dockerfile, + "manifests/registry.clusterserviceversion.yaml": manifestsRegistryClusterserviceversionYaml, + "metadata/annotations.yaml": metadataAnnotationsYaml, + "metadata/properties.yaml": metadataPropertiesYaml, + "tests/scorecard/config.yaml": testsScorecardConfigYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "Dockerfile": &bintree{dockerfile, map[string]*bintree{}}, + "manifests": &bintree{nil, map[string]*bintree{ + "registry.clusterserviceversion.yaml": &bintree{manifestsRegistryClusterserviceversionYaml, map[string]*bintree{}}, + }}, + "metadata": &bintree{nil, map[string]*bintree{ + "annotations.yaml": &bintree{metadataAnnotationsYaml, map[string]*bintree{}}, + "properties.yaml": &bintree{metadataPropertiesYaml, map[string]*bintree{}}, + }}, + "tests": &bintree{nil, map[string]*bintree{ + "scorecard": &bintree{nil, map[string]*bintree{ + "config.yaml": &bintree{testsScorecardConfigYaml, map[string]*bintree{}}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/openshift/tests-extension/pkg/env/cluster.go b/openshift/tests-extension/pkg/env/cluster.go index 7328ca610..f5129aee6 100644 --- a/openshift/tests-extension/pkg/env/cluster.go +++ b/openshift/tests-extension/pkg/env/cluster.go @@ -1,10 +1,16 @@ package env import ( + "context" + "fmt" "log" "os" + bsemver "github.com/blang/semver/v4" + buildv1 "github.com/openshift/api/build/v1" configv1 "github.com/openshift/api/config/v1" + imagev1 "github.com/openshift/api/image/v1" + operatorv1 "github.com/openshift/api/operator/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -32,6 +38,9 @@ type TestEnv struct { // True if the cluster is detected as an OpenShift environment IsOpenShift bool + + // Set to the MAJOR.MINOR version of OpenShift, blank otherwise + OpenShiftVersion string } // testEnv is the global shared instance used by all tests. @@ -80,7 +89,10 @@ func initTestEnv() *TestEnv { utilruntime.Must(olmv1.AddToScheme(scheme)) if isOcp { + utilruntime.Must(buildv1.AddToScheme(scheme)) utilruntime.Must(configv1.AddToScheme(scheme)) + utilruntime.Must(imagev1.AddToScheme(scheme)) + utilruntime.Must(operatorv1.AddToScheme(scheme)) } k8sClient, err := crclient.New(cfg, crclient.Options{Scheme: scheme}) @@ -88,15 +100,31 @@ func initTestEnv() *TestEnv { log.Fatalf("failed to create controller-runtime client: %v", err) } + version := "" if isOcp { extlogs.Infof("[env] Cluster environment initialized (OpenShift: %t)", isOcp) + version = getOcpVersion(k8sClient) } return &TestEnv{ - RestCfg: cfg, - K8sClient: k8sClient, - IsOpenShift: isOcp, + RestCfg: cfg, + K8sClient: k8sClient, + IsOpenShift: isOcp, + OpenShiftVersion: version, + } +} + +func getOcpVersion(c crclient.Client) string { + cv := &configv1.ClusterVersion{} + err := c.Get(context.Background(), crclient.ObjectKey{Name: "version"}, cv) + if err != nil { + return "" + } + v, err := bsemver.Parse(cv.Status.Desired.Version) + if err != nil { + return "" } + return fmt.Sprintf("%d.%d", v.Major, v.Minor) } // getRestConfig returns a Kubernetes REST config for the test client. diff --git a/openshift/tests-extension/pkg/helpers/cluster_extension.go b/openshift/tests-extension/pkg/helpers/cluster_extension.go index df4a823ea..4e9856cf8 100644 --- a/openshift/tests-extension/pkg/helpers/cluster_extension.go +++ b/openshift/tests-extension/pkg/helpers/cluster_extension.go @@ -26,10 +26,12 @@ import ( // CreateClusterExtension creates a ServiceAccount, ClusterRoleBinding, and ClusterExtension using typed APIs. // It returns the unique suffix and a cleanup function. -func CreateClusterExtension(packageName, version, namespace string) (string, func()) { +func CreateClusterExtension(packageName, version, namespace, unique string) (string, func()) { ctx := context.TODO() k8sClient := env.Get().K8sClient - unique := rand.String(4) + if unique == "" { + unique = rand.String(4) + } saName := "install-test-sa-" + unique crbName := "install-test-crb-" + unique @@ -54,6 +56,7 @@ func CreateClusterExtension(packageName, version, namespace string) (string, fun // Cleanup closure return ceName, func() { + By("deleting CluserExtension, ClusterRoleBinding and ServiceAccount") _ = k8sClient.Delete(ctx, ce) _ = k8sClient.Delete(ctx, crb) _ = k8sClient.Delete(ctx, sa) diff --git a/openshift/tests-extension/test/olmv1-incompatible.go b/openshift/tests-extension/test/olmv1-incompatible.go new file mode 100644 index 000000000..2ada48893 --- /dev/null +++ b/openshift/tests-extension/test/olmv1-incompatible.go @@ -0,0 +1,448 @@ +package test + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "time" + + //nolint:staticcheck // ST1001: dot-imports for readability + . "github.com/onsi/ginkgo/v2" + //nolint:staticcheck // ST1001: dot-imports for readability + . "github.com/onsi/gomega" + + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" + imagev1 "github.com/openshift/api/image/v1" + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + olmv1 "github.com/operator-framework/operator-controller/api/v1" + + catalogdata "github/operator-framework-operator-controller/openshift/tests-extension/pkg/bindata/catalog" + operatordata "github/operator-framework-operator-controller/openshift/tests-extension/pkg/bindata/operator" + "github/operator-framework-operator-controller/openshift/tests-extension/pkg/env" + "github/operator-framework-operator-controller/openshift/tests-extension/pkg/helpers" +) + +var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation", func() { + BeforeEach(func() { + helpers.RequireOLMv1CapabilityOnOpenshift() + }) + unique := rand.String(8) + nsName := "install-test-ns-" + unique + ccName := "install-test-cc-" + unique + rbName := "install-test-rb-" + unique + opName := "install-test-op-" + unique + It("should block cluster upgrades if an incompatible operator is installed", func(ctx SpecContext) { + if !env.Get().IsOpenShift { + Skip("Requires OCP APIs: not OpenShift") + } + + By(fmt.Sprintf("setting a unique value: %q", unique)) + + testVersion := env.Get().OpenShiftVersion + replacements := map[string]string{ + "TEST-BUNDLE": opName, + "NAMESPACE": nsName, + "VERSION": testVersion, + } + By(fmt.Sprintf("testing against OCP %s", testVersion)) + + By("finding a k8s client") + cmdLine, err := getK8sCommandLineClient() + Expect(err).To(Succeed()) + + By("creating a new Namespace") + nsCleanup := createNamespace(nsName) + DeferCleanup(nsCleanup) + + By("applying image-puller RoleBinding") + rbCleanup := createImagePullerRoleBinding(rbName, nsName) + DeferCleanup(rbCleanup) + + By("creating the operator BuildConfig") + bcCleanup := createBuildConfig(opName, nsName) + DeferCleanup(bcCleanup) + + By("creating the operator ImageStream") + isCleanup := createImageStream(opName, nsName) + DeferCleanup(isCleanup) + + By("creating the operator tarball") + fileOperator, fileCleanup := createTempTarBall(replacements, operatordata.AssetNames, operatordata.Asset) + DeferCleanup(fileCleanup) + By(fmt.Sprintf("created operator tarball %q", fileOperator)) + + By(fmt.Sprintf("starting the operator build with %q via RAW URL", cmdLine)) + opArgs := []string{ + "create", + "--raw", + fmt.Sprintf( + "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", + nsName, opName, opName, nsName, + ), + "-f", + fileOperator, + } + buildOperator := startBuild(cmdLine, opArgs...) + + By(fmt.Sprintf("waiting for the build %q to finish", buildOperator.Name)) + waitForBuildToFinish(ctx, buildOperator.Name, nsName) + + By("creating the catalog BuildConfig") + bcCleanup = createBuildConfig(ccName, nsName) + DeferCleanup(bcCleanup) + + By("creating the catalog ImageStream") + isCleanup = createImageStream(ccName, nsName) + DeferCleanup(isCleanup) + + By("creating the catalog tarball") + fileCatalog, fileCleanup := createTempTarBall(replacements, catalogdata.AssetNames, catalogdata.Asset) + DeferCleanup(fileCleanup) + By(fmt.Sprintf("created catalog tarball %q", fileCatalog)) + + By(fmt.Sprintf("starting the catalog build with %q via RAW URL", cmdLine)) + catalogArgs := []string{ + "create", + "--raw", + fmt.Sprintf( + "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", + nsName, ccName, ccName, nsName, + ), + "-f", + fileCatalog, + } + buildCatalog := startBuild(cmdLine, catalogArgs...) + + By(fmt.Sprintf("waiting for the build %q to finish", buildCatalog.Name)) + waitForBuildToFinish(ctx, buildCatalog.Name, nsName) + + By("creating the ClusterCatalog") + ccCleanup := createClusterCatalog(ccName, nsName) + DeferCleanup(ccCleanup) + + By("waiting for InstalledOLMOperatorUpgradable to be true") + waitForOlmUpgradeStatus(ctx, operatorv1.ConditionTrue, "") + + By("creating the ClusterExtension") + ceName, ceCleanup := helpers.CreateClusterExtension(opName, "", nsName, unique) + DeferCleanup(ceCleanup) + helpers.ExpectClusterExtensionToBeInstalled(ctx, ceName) + + By("waiting for InstalledOLMOperatorUpgradable to be false") + waitForOlmUpgradeStatus(ctx, operatorv1.ConditionFalse, ceName) + + By("waiting for ClusterOperator Upgradeable to be false") + waitForClusterOperatorUpgradable(ctx, ceName) + }) +}) + +func createClusterCatalog(name, namespace string) func() { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + cc := &olmv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: olmv1.ClusterCatalogSpec{ + AvailabilityMode: olmv1.AvailabilityModeAvailable, + Priority: 0, + Source: olmv1.CatalogSource{ + Type: olmv1.SourceTypeImage, + Image: &olmv1.ImageSource{ + PollIntervalMinutes: ptr.To(int(600)), + Ref: fmt.Sprintf("image-registry.openshift-image-registry.svc:5000/%s/%s:latest", namespace, name), + }, + }, + }, + } + + Expect(k8sClient.Create(ctx, cc)).To(Succeed(), "failed to create ClusterCatalog") + waitForClusterCatalogServing(ctx, cc.Name) + return func() { + By(fmt.Sprintf("deleting ClusterCatalog %q", name)) + Expect(k8sClient.Delete(context.Background(), cc)).To(Succeed()) + } +} + +func createImagePullerRoleBinding(name, namespace string) func() { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "system:image-puller", + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: "system:serviceaccounts:openshift-catalogd", + }, + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: "system:serviceaccounts:openshift-operator-controller", + }, + }, + } + Expect(k8sClient.Create(ctx, rb)).To(Succeed(), "failed to create image-puller RoleBinding") + return func() { + By(fmt.Sprintf("deleting image-puller RoleBinding %q", name)) + _ = k8sClient.Delete(ctx, rb) + } +} + +func createNamespace(namespace string) func() { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + Expect(k8sClient.Create(ctx, ns)).To(Succeed(), "failed to create Namespace: %q", namespace) + return func() { + By(fmt.Sprintf("deleting Namespace %q", namespace)) + _ = k8sClient.Delete(context.Background(), ns) + } +} + +func createImageStream(name, namespace string) func() { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + is := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "name": name, + }, + }, + } + + Expect(k8sClient.Create(ctx, is)).To(Succeed(), "failed to create ImageStream: %q", name) + return func() { + By(fmt.Sprintf("deleting ImageStream %q", name)) + _ = k8sClient.Delete(context.Background(), is) + } +} + +func createBuildConfig(name, namespace string) func() { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + bc := &buildv1.BuildConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "name": name, + }, + }, + Spec: buildv1.BuildConfigSpec{ + CommonSpec: buildv1.CommonSpec{ + Source: buildv1.BuildSource{ + Type: buildv1.BuildSourceBinary, + }, + Strategy: buildv1.BuildStrategy{ + Type: buildv1.DockerBuildStrategyType, + DockerStrategy: &buildv1.DockerBuildStrategy{ + ForcePull: true, + From: &corev1.ObjectReference{ + Kind: "DockerImage", + Name: "scratch", + }, + Env: []corev1.EnvVar{ + { + Name: "BUILD_LOGLEVEL", + Value: "5", + }, + }, + }, + }, + Output: buildv1.BuildOutput{ + To: &corev1.ObjectReference{ + Kind: "ImageStreamTag", + Name: name + ":latest", + }, + }, + }, + }, + } + + Expect(k8sClient.Create(ctx, bc)).To(Succeed(), "failed to create BuildConfig: %q", name) + return func() { + By(fmt.Sprintf("deleting BuildConfig %q", name)) + _ = k8sClient.Delete(context.Background(), bc) + } +} + +func waitForBuildToFinish(ctx SpecContext, name, namespace string) { + const typeBuildConditionComplete = "Complete" + k8sClient := env.Get().K8sClient + Eventually(func(g Gomega) { + b := &buildv1.Build{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, b) + g.Expect(err).ToNot(HaveOccurred()) + + conditions := b.Status.Conditions + var cond *buildv1.BuildCondition + for i := range conditions { + if conditions[i].Type == typeBuildConditionComplete { + cond = &conditions[i] + break + } + } + g.Expect(cond).ToNot(BeNil()) + g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) +} + +func waitForClusterCatalogServing(ctx context.Context, name string) { + k8sClient := env.Get().K8sClient + Eventually(func(g Gomega) { + cc := &olmv1.ClusterCatalog{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: name}, cc) + g.Expect(err).ToNot(HaveOccurred()) + + serving := meta.FindStatusCondition(cc.Status.Conditions, olmv1.TypeServing) + g.Expect(serving).ToNot(BeNil()) + g.Expect(serving.Status).To(Equal(metav1.ConditionTrue)) + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) +} + +func waitForOlmUpgradeStatus(ctx SpecContext, status operatorv1.ConditionStatus, name string) { + const reasonIncompatibleOperatorsInstalled = "IncompatibleOperatorsInstalled" + const typeInstalledOLMOperatorsUpgradeable = "InstalledOLMOperatorsUpgradeable" + k8sClient := env.Get().K8sClient + Eventually(func(g Gomega) { + olm := &operatorv1.OLM{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: "cluster"}, olm) + g.Expect(err).ToNot(HaveOccurred()) + + conditions := olm.Status.Conditions + var cond *operatorv1.OperatorCondition + for i := range conditions { + if conditions[i].Type == typeInstalledOLMOperatorsUpgradeable { + cond = &conditions[i] + break + } + } + g.Expect(cond).ToNot(BeNil(), "missing condition: %q", typeInstalledOLMOperatorsUpgradeable) + g.Expect(cond.Status).To(Equal(status)) + if status == operatorv1.ConditionFalse { + g.Expect(name).ToNot(BeEmpty()) + g.Expect(cond.Reason).To(Equal(reasonIncompatibleOperatorsInstalled)) + g.Expect(cond.Message).To(ContainSubstring(name)) + } + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) +} + +func waitForClusterOperatorUpgradable(ctx SpecContext, name string) { + const reasonIncompatibleOperatorsInstalled = "InstalledOLMOperators_IncompatibleOperatorsInstalled" + + Eventually(func(g Gomega) { + k8sClient := env.Get().K8sClient + obj := &configv1.ClusterOperator{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: "olm"}, obj) + g.Expect(err).ToNot(HaveOccurred()) + + var cond *configv1.ClusterOperatorStatusCondition + for i, c := range obj.Status.Conditions { + if c.Type == configv1.OperatorUpgradeable { + cond = &obj.Status.Conditions[i] + break + } + } + + g.Expect(cond).ToNot(BeNil(), "missing condition: %q", configv1.OperatorUpgradeable) + g.Expect(cond.Status).To(Equal(configv1.ConditionFalse)) + g.Expect(cond.Reason).To(Equal(reasonIncompatibleOperatorsInstalled)) + g.Expect(cond.Message).To(ContainSubstring(name)) + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) +} + +func getK8sCommandLineClient() (string, error) { + s, err := exec.LookPath("kubectl") + if err != nil { + s, err = exec.LookPath("oc") + } + return s, err +} + +func startBuild(cmdLine string, args ...string) *buildv1.Build { + cmd := exec.Command(cmdLine, args...) + output, err := cmd.Output() + Expect(err).To(Succeed(), printExitError(err)) + /* The output is JSON of a build.build.openshift.io resource */ + build := &buildv1.Build{} + Expect(json.Unmarshal(output, build)).To(Succeed(), "failed to unmarshal build") + return build +} + +func printExitError(err error) string { + if err == nil { + return "" + } + exiterr := &exec.ExitError{} + if errors.As(err, &exiterr) { + return fmt.Sprintf("ExitError.Stderr: %q", string(exiterr.Stderr)) + } + return err.Error() +} + +func createTempTarBall(replacements map[string]string, getAssetNames func() []string, getAsset func(string) ([]byte, error)) (string, func()) { + file, err := os.CreateTemp("", "bundle-*.tar") + Expect(err).To(Succeed()) + filename := file.Name() + + namesCatalog := getAssetNames() + twCatalog := tar.NewWriter(file) + for _, name := range namesCatalog { + data, err := getAsset(name) + Expect(err).To(Succeed()) + for k, v := range replacements { + data = bytes.ReplaceAll(data, []byte(k), []byte(v)) + } + hdr := &tar.Header{ + Name: name, + Size: int64(len(data)), + Mode: 0o644, + } + err = twCatalog.WriteHeader(hdr) + Expect(err).To(Succeed()) + _, err = twCatalog.Write(data) + Expect(err).To(Succeed()) + } + Expect(twCatalog.Close()).To(Succeed(), "failed to close tar writer for file %q", filename) + Expect(file.Close()).To(Succeed(), "failed to close tar file %q", filename) + + return filename, func() { + By(fmt.Sprintf("deleting file %q", filename)) + Expect(os.Remove(filename)).To(Succeed()) + } +} diff --git a/openshift/tests-extension/test/olmv1.go b/openshift/tests-extension/test/olmv1.go index df7138be2..d871eb3c8 100644 --- a/openshift/tests-extension/test/olmv1.go +++ b/openshift/tests-extension/test/olmv1.go @@ -10,7 +10,6 @@ import ( //nolint:staticcheck // ST1001: dot-imports for readability . "github.com/onsi/gomega" - configv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" @@ -103,7 +102,7 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 helpers.EnsureCleanupClusterExtension(context.Background(), "quay-operator", "quayregistries.quay.redhat.com") By("applying the ClusterExtension resource") - name, cleanup := helpers.CreateClusterExtension("quay-operator", "3.13.0", namespace) + name, cleanup := helpers.CreateClusterExtension("quay-operator", "3.13.0", namespace, "") DeferCleanup(cleanup) By("waiting for the quay-operator ClusterExtension to be installed") @@ -115,7 +114,7 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 helpers.EnsureCleanupClusterExtension(context.Background(), "does-not-exist", "") // No CRD expected for non-existing operator By("applying the ClusterExtension resource") - name, cleanup := helpers.CreateClusterExtension("does-not-exist", "99.99.99", namespace) + name, cleanup := helpers.CreateClusterExtension("does-not-exist", "99.99.99", namespace, "") DeferCleanup(cleanup) By("waiting for the ClusterExtension to exist") @@ -143,57 +142,4 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 g.Expect(installed.Message).To(Equal("No bundle installed")) }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) }) - - It("should block cluster upgrades if an incompatible operator is installed", func(ctx SpecContext) { - if !env.Get().IsOpenShift { - Skip("Requires OCP Catalogs: not OpenShift") - } - - By("ensuring no ClusterExtension no ClusterExtension and CRD for cluster-logging") - helpers.EnsureCleanupClusterExtension(context.Background(), "cluster-logging", "clusterloggings.logging.openshift.io") - - By("applying the ClusterExtension resource") - name, cleanup := helpers.CreateClusterExtension("cluster-logging", "6.2.2", namespace) - DeferCleanup(cleanup) - - By("waiting for the cluster-logging ClusterExtension to be installed") - Eventually(func(g Gomega) { - k8sClient := env.Get().K8sClient - ce := &olmv1.ClusterExtension{} - err := k8sClient.Get(ctx, client.ObjectKey{Name: name}, ce) - g.Expect(err).ToNot(HaveOccurred()) - - progressing := meta.FindStatusCondition(ce.Status.Conditions, olmv1.TypeProgressing) - g.Expect(progressing).ToNot(BeNil()) - g.Expect(progressing.Status).To(Equal(metav1.ConditionTrue)) - - installed := meta.FindStatusCondition(ce.Status.Conditions, olmv1.TypeInstalled) - g.Expect(installed).ToNot(BeNil()) - g.Expect(installed.Status).To(Equal(metav1.ConditionTrue)) - }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) - - By("ensuring the cluster is not upgradeable when olm.maxopenshiftversion is specified") - const typeUpgradeable = "Upgradeable" - const reasonIncompatibleOperatorsInstalled = "InstalledOLMOperators_IncompatibleOperatorsInstalled" - - Eventually(func(g Gomega) { - k8sClient := env.Get().K8sClient - obj := &configv1.ClusterOperator{} - err := k8sClient.Get(ctx, client.ObjectKey{Name: "olm"}, obj) - g.Expect(err).ToNot(HaveOccurred()) - - var cond *configv1.ClusterOperatorStatusCondition - for i, c := range obj.Status.Conditions { - if c.Type == typeUpgradeable { - cond = &obj.Status.Conditions[i] - break - } - } - - g.Expect(cond).ToNot(BeNil(), "missing condition: %q", typeUpgradeable) - g.Expect(cond.Status).To(Equal(configv1.ConditionFalse)) - g.Expect(cond.Reason).To(Equal(reasonIncompatibleOperatorsInstalled)) - g.Expect(cond.Message).To(ContainSubstring(name)) - }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) - }) }) diff --git a/openshift/tests-extension/testdata/catalog/Dockerfile b/openshift/tests-extension/testdata/catalog/Dockerfile new file mode 100644 index 000000000..e865e9523 --- /dev/null +++ b/openshift/tests-extension/testdata/catalog/Dockerfile @@ -0,0 +1,3 @@ +FROM scratch +ADD configs /configs +LABEL operators.operatorframework.io.index.configs.v1=/configs diff --git a/openshift/tests-extension/testdata/catalog/configs/.indexignore b/openshift/tests-extension/testdata/catalog/configs/.indexignore new file mode 100644 index 000000000..931249a81 --- /dev/null +++ b/openshift/tests-extension/testdata/catalog/configs/.indexignore @@ -0,0 +1 @@ +..* diff --git a/openshift/tests-extension/testdata/catalog/configs/index.yaml b/openshift/tests-extension/testdata/catalog/configs/index.yaml new file mode 100644 index 000000000..b2e808dc7 --- /dev/null +++ b/openshift/tests-extension/testdata/catalog/configs/index.yaml @@ -0,0 +1,33 @@ +{ + "schema": "olm.package", + "name": "TEST-BUNDLE" +} +{ + "schema": "olm.bundle", + "name": "TEST-BUNDLE.v0.0.1", + "package": "TEST-BUNDLE", + "image": "image-registry.openshift-image-registry.svc:5000/NAMESPACE/TEST-BUNDLE:latest", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "TEST-BUNDLE", + "version": "0.0.1" + } + }, + { + "type": "olm.maxOpenShiftVersion", + "value": "VERSION" + } + ] +} +{ + "schema": "olm.channel", + "name": "preview", + "package": "TEST-BUNDLE", + "entries": [ + { + "name": "TEST-BUNDLE.v0.0.1" + } + ] +} diff --git a/openshift/tests-extension/testdata/operator/Dockerfile b/openshift/tests-extension/testdata/operator/Dockerfile new file mode 100644 index 000000000..3d4cd3454 --- /dev/null +++ b/openshift/tests-extension/testdata/operator/Dockerfile @@ -0,0 +1,20 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=registry +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.34.1 +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY manifests /manifests/ +COPY metadata /metadata/ +COPY tests/scorecard /tests/scorecard/ diff --git a/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml b/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml new file mode 100644 index 000000000..1e38c8d51 --- /dev/null +++ b/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml @@ -0,0 +1,146 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "oc-opdev-e2e.operatorframework.io.oc-opdev-e2e.operatorframework.io/v1alpha1", + "kind": "Registry", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "TEST-BUNDLE" + }, + "name": "TEST-BUNDLE-sample" + }, + "spec": null + } + ] + capabilities: Basic Install + createdAt: "2025-08-06T17:43:41Z" + operators.operatorframework.io/builder: operator-sdk-v1.34.1 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + name: TEST-BUNDLE.v0.0.1 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinition: {} + description: TEST-BUNDLE description. TODO. + displayName: TEST-BUNDLE + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: + - label: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: TEST-BUNDLE + control-plane: controller-manager + name: TEST-BUNDLE-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + command: + - sleep + - "1000" + image: busybox + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: default + terminationGracePeriodSeconds: 10 + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: TEST-BUNDLE-controller-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - TEST-BUNDLE + links: + - name: TEST-BUNDLE + url: https://TEST-BUNDLE.domain + maintainers: + - email: your@email.com + name: Maintainer Name + maturity: alpha + provider: + name: Provider Name + url: https://your.domain + version: 0.0.1 diff --git a/openshift/tests-extension/testdata/operator/metadata/annotations.yaml b/openshift/tests-extension/testdata/operator/metadata/annotations.yaml new file mode 100644 index 000000000..098f80652 --- /dev/null +++ b/openshift/tests-extension/testdata/operator/metadata/annotations.yaml @@ -0,0 +1,14 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: registry + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.34.1 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/openshift/tests-extension/testdata/operator/metadata/properties.yaml b/openshift/tests-extension/testdata/operator/metadata/properties.yaml new file mode 100644 index 000000000..a79cdbd24 --- /dev/null +++ b/openshift/tests-extension/testdata/operator/metadata/properties.yaml @@ -0,0 +1,3 @@ +properties: + - type: olm.maxOpenShiftVersion + value: "VERSION" diff --git a/openshift/tests-extension/testdata/operator/tests/scorecard/config.yaml b/openshift/tests-extension/testdata/operator/tests/scorecard/config.yaml new file mode 100644 index 000000000..37b555ed2 --- /dev/null +++ b/openshift/tests-extension/testdata/operator/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/LICENSE b/openshift/tests-extension/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 000000000..5ba5c86fc --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/json.go b/openshift/tests-extension/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/range.go b/openshift/tests-extension/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 000000000..95f7139b9 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/semver.go b/openshift/tests-extension/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 000000000..307de610f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/sort.go b/openshift/tests-extension/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/openshift/tests-extension/vendor/github.com/blang/semver/v4/sql.go b/openshift/tests-extension/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 000000000..db958134f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/consts.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/consts.go new file mode 100644 index 000000000..0d9c8f03b --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/consts.go @@ -0,0 +1,202 @@ +package v1 + +// annotations +const ( + // BuildAnnotation is an annotation that identifies a Pod as being for a Build + BuildAnnotation = "openshift.io/build.name" + + // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from + BuildConfigAnnotation = "openshift.io/build-config.name" + + // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from + BuildCloneAnnotation = "openshift.io/build.clone-of" + + // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build + BuildNumberAnnotation = "openshift.io/build.number" + + // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build + BuildPodNameAnnotation = "openshift.io/build.pod-name" + + // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information + BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" + + // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log + BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" + + // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering) + BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url" + + // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin + BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url" + + // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build + BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" + + // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used + BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-" + + // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused. + // New Builds cannot be instantiated from a paused BuildConfig. + BuildConfigPausedAnnotation = "openshift.io/build-config.paused" +) + +// labels +const ( + // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig + // on which the Build is based. NOTE: The value for this label may not contain the entire + // BuildConfig name because it will be truncated to maximum label length. + BuildConfigLabel = "openshift.io/build-config.name" + + // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. + // NOTE: The value for this label may not contain the entire Build name because it will be + // truncated to maximum label length. + BuildLabel = "openshift.io/build.name" + + // BuildRunPolicyLabel represents the start policy used to start the build. + BuildRunPolicyLabel = "openshift.io/build.start-policy" + + // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces. + // We keep it for backward compatibility. + BuildConfigLabelDeprecated = "buildconfig" +) + +const ( + // StatusReasonError is a generic reason for a build error condition. + StatusReasonError StatusReason = "Error" + + // StatusReasonCannotCreateBuildPodSpec is an error condition when the build + // strategy cannot create a build pod spec. + StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" + + // StatusReasonCannotCreateBuildPod is an error condition when a build pod + // cannot be created. + StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" + + // StatusReasonInvalidOutputReference is an error condition when the build + // output is an invalid reference. + StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" + + // StatusReasonInvalidImageReference is an error condition when the build + // references an invalid image. + StatusReasonInvalidImageReference StatusReason = "InvalidImageReference" + + // StatusReasonCancelBuildFailed is an error condition when cancelling a build + // fails. + StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" + + // StatusReasonBuildPodDeleted is an error condition when the build pod is + // deleted before build completion. + StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" + + // StatusReasonExceededRetryTimeout is an error condition when the build has + // not completed and retrying the build times out. + StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" + + // StatusReasonMissingPushSecret indicates that the build is missing required + // secret for pushing the output image. + // The build will stay in the pending state until the secret is created, or the build times out. + StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" + + // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. + StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" + + // StatusReasonPushImageToRegistryFailed indicates that an image failed to be + // pushed to the registry. + StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" + + // StatusReasonPullBuilderImageFailed indicates that we failed to pull the + // builder image. + StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" + + // StatusReasonFetchSourceFailed indicates that fetching the source of the + // build has failed. + StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" + + // StatusReasonFetchImageContentFailed indicates that the fetching of an image and extracting + // its contents for inclusion in the build has failed. + StatusReasonFetchImageContentFailed StatusReason = "FetchImageContentFailed" + + // StatusReasonManageDockerfileFailed indicates that the set up of the Dockerfile for the build + // has failed. + StatusReasonManageDockerfileFailed StatusReason = "ManageDockerfileFailed" + + // StatusReasonInvalidContextDirectory indicates that the supplied + // contextDir does not exist + StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory" + + // StatusReasonCancelledBuild indicates that the build was cancelled by the + // user. + StatusReasonCancelledBuild StatusReason = "CancelledBuild" + + // StatusReasonDockerBuildFailed indicates that the container image build strategy has + // failed. + StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" + + // StatusReasonBuildPodExists indicates that the build tried to create a + // build pod but one was already present. + StatusReasonBuildPodExists StatusReason = "BuildPodExists" + + // StatusReasonNoBuildContainerStatus indicates that the build failed because the + // the build pod has no container statuses. + StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" + + // StatusReasonFailedContainer indicates that the pod for the build has at least + // one container with a non-zero exit status. + StatusReasonFailedContainer StatusReason = "FailedContainer" + + // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing + // the supplied options for environment variables in the build strategy environment + StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable" + + // StatusReasonGenericBuildFailed is the reason associated with a broad + // range of build failures. + StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" + + // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption + StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" + + // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure + // to look up the service account associated with the BuildConfig. + StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" + + // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted + // from its node + StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" +) + +// WhitelistEnvVarNames is a list of environment variable names that are allowed to be specified +// in a buildconfig and merged into the created build pods, the code for this is located in +// openshift/openshift-controller-manager +var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "GIT_LFS_SKIP_SMUDGE", "LANG", + "HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY", "http_proxy", "https_proxy", "no_proxy"} + +// env vars +const ( + + // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when + // performing a custom build, if needed. + CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" + + // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in + // Source builder images + AllowedUIDs = "ALLOWED_UIDS" + // DropCapabilities is an environment variable that contains a list of capabilities to drop when + // executing a Source build + DropCapabilities = "DROP_CAPS" +) + +// keys inside of secrets and configmaps +const ( + // WebHookSecretKey is the key used to identify the value containing the webhook invocation + // secret within a secret referenced by a webhook trigger. + WebHookSecretKey = "WebHookSecretKey" + + // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file. + RegistryConfKey = "registries.conf" + + // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file. + SignaturePolicyKey = "policy.json" + + // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods. + ServiceCAKey = "service-ca.crt" +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/doc.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/doc.go new file mode 100644 index 000000000..9bc16f64b --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=build.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.pb.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.pb.go new file mode 100644 index 000000000..1b026f354 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.pb.go @@ -0,0 +1,17545 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/build/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } +func (*BinaryBuildRequestOptions) ProtoMessage() {} +func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{0} +} +func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src) +} +func (m *BinaryBuildRequestOptions) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo + +func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } +func (*BinaryBuildSource) ProtoMessage() {} +func (*BinaryBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{1} +} +func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildSource.Merge(m, src) +} +func (m *BinaryBuildSource) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo + +func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} } +func (*BitbucketWebHookCause) ProtoMessage() {} +func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{2} +} +func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketWebHookCause.Merge(m, src) +} +func (m *BitbucketWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *BitbucketWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo + +func (m *Build) Reset() { *m = Build{} } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{3} +} +func (m *Build) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Build) XXX_Merge(src proto.Message) { + xxx_messageInfo_Build.Merge(m, src) +} +func (m *Build) XXX_Size() int { + return m.Size() +} +func (m *Build) XXX_DiscardUnknown() { + xxx_messageInfo_Build.DiscardUnknown(m) +} + +var xxx_messageInfo_Build proto.InternalMessageInfo + +func (m *BuildCondition) Reset() { *m = BuildCondition{} } +func (*BuildCondition) ProtoMessage() {} +func (*BuildCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{4} +} +func (m *BuildCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildCondition.Merge(m, src) +} +func (m *BuildCondition) XXX_Size() int { + return m.Size() +} +func (m *BuildCondition) XXX_DiscardUnknown() { + xxx_messageInfo_BuildCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildCondition proto.InternalMessageInfo + +func (m *BuildConfig) Reset() { *m = BuildConfig{} } +func (*BuildConfig) ProtoMessage() {} +func (*BuildConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{5} +} +func (m *BuildConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfig.Merge(m, src) +} +func (m *BuildConfig) XXX_Size() int { + return m.Size() +} +func (m *BuildConfig) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfig proto.InternalMessageInfo + +func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } +func (*BuildConfigList) ProtoMessage() {} +func (*BuildConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{6} +} +func (m *BuildConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigList.Merge(m, src) +} +func (m *BuildConfigList) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo + +func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } +func (*BuildConfigSpec) ProtoMessage() {} +func (*BuildConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{7} +} +func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigSpec.Merge(m, src) +} +func (m *BuildConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo + +func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } +func (*BuildConfigStatus) ProtoMessage() {} +func (*BuildConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{8} +} +func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigStatus.Merge(m, src) +} +func (m *BuildConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo + +func (m *BuildList) Reset() { *m = BuildList{} } +func (*BuildList) ProtoMessage() {} +func (*BuildList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{9} +} +func (m *BuildList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildList.Merge(m, src) +} +func (m *BuildList) XXX_Size() int { + return m.Size() +} +func (m *BuildList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildList proto.InternalMessageInfo + +func (m *BuildLog) Reset() { *m = BuildLog{} } +func (*BuildLog) ProtoMessage() {} +func (*BuildLog) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{10} +} +func (m *BuildLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLog.Merge(m, src) +} +func (m *BuildLog) XXX_Size() int { + return m.Size() +} +func (m *BuildLog) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLog.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLog proto.InternalMessageInfo + +func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } +func (*BuildLogOptions) ProtoMessage() {} +func (*BuildLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{11} +} +func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLogOptions.Merge(m, src) +} +func (m *BuildLogOptions) XXX_Size() int { + return m.Size() +} +func (m *BuildLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo + +func (m *BuildOutput) Reset() { *m = BuildOutput{} } +func (*BuildOutput) ProtoMessage() {} +func (*BuildOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{12} +} +func (m *BuildOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildOutput.Merge(m, src) +} +func (m *BuildOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildOutput proto.InternalMessageInfo + +func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } +func (*BuildPostCommitSpec) ProtoMessage() {} +func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{13} +} +func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildPostCommitSpec.Merge(m, src) +} +func (m *BuildPostCommitSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildPostCommitSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo + +func (m *BuildRequest) Reset() { *m = BuildRequest{} } +func (*BuildRequest) ProtoMessage() {} +func (*BuildRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{14} +} +func (m *BuildRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildRequest.Merge(m, src) +} +func (m *BuildRequest) XXX_Size() int { + return m.Size() +} +func (m *BuildRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildRequest proto.InternalMessageInfo + +func (m *BuildSource) Reset() { *m = BuildSource{} } +func (*BuildSource) ProtoMessage() {} +func (*BuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{15} +} +func (m *BuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSource.Merge(m, src) +} +func (m *BuildSource) XXX_Size() int { + return m.Size() +} +func (m *BuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSource proto.InternalMessageInfo + +func (m *BuildSpec) Reset() { *m = BuildSpec{} } +func (*BuildSpec) ProtoMessage() {} +func (*BuildSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{16} +} +func (m *BuildSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSpec.Merge(m, src) +} +func (m *BuildSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSpec proto.InternalMessageInfo + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{17} +} +func (m *BuildStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatus.Merge(m, src) +} +func (m *BuildStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatus proto.InternalMessageInfo + +func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} } +func (*BuildStatusOutput) ProtoMessage() {} +func (*BuildStatusOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{18} +} +func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutput.Merge(m, src) +} +func (m *BuildStatusOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo + +func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} } +func (*BuildStatusOutputTo) ProtoMessage() {} +func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{19} +} +func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutputTo.Merge(m, src) +} +func (m *BuildStatusOutputTo) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutputTo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo + +func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } +func (*BuildStrategy) ProtoMessage() {} +func (*BuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{20} +} +func (m *BuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStrategy.Merge(m, src) +} +func (m *BuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *BuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo + +func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } +func (*BuildTriggerCause) ProtoMessage() {} +func (*BuildTriggerCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{21} +} +func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerCause.Merge(m, src) +} +func (m *BuildTriggerCause) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerCause) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo + +func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } +func (*BuildTriggerPolicy) ProtoMessage() {} +func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{22} +} +func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerPolicy.Merge(m, src) +} +func (m *BuildTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo + +func (m *BuildVolume) Reset() { *m = BuildVolume{} } +func (*BuildVolume) ProtoMessage() {} +func (*BuildVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{23} +} +func (m *BuildVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolume.Merge(m, src) +} +func (m *BuildVolume) XXX_Size() int { + return m.Size() +} +func (m *BuildVolume) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolume proto.InternalMessageInfo + +func (m *BuildVolumeMount) Reset() { *m = BuildVolumeMount{} } +func (*BuildVolumeMount) ProtoMessage() {} +func (*BuildVolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{24} +} +func (m *BuildVolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeMount.Merge(m, src) +} +func (m *BuildVolumeMount) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeMount proto.InternalMessageInfo + +func (m *BuildVolumeSource) Reset() { *m = BuildVolumeSource{} } +func (*BuildVolumeSource) ProtoMessage() {} +func (*BuildVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{25} +} +func (m *BuildVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeSource.Merge(m, src) +} +func (m *BuildVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeSource proto.InternalMessageInfo + +func (m *CommonSpec) Reset() { *m = CommonSpec{} } +func (*CommonSpec) ProtoMessage() {} +func (*CommonSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{26} +} +func (m *CommonSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonSpec.Merge(m, src) +} +func (m *CommonSpec) XXX_Size() int { + return m.Size() +} +func (m *CommonSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CommonSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonSpec proto.InternalMessageInfo + +func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} } +func (*CommonWebHookCause) ProtoMessage() {} +func (*CommonWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{27} +} +func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonWebHookCause.Merge(m, src) +} +func (m *CommonWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *CommonWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo + +func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} } +func (*ConfigMapBuildSource) ProtoMessage() {} +func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{28} +} +func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapBuildSource.Merge(m, src) +} +func (m *ConfigMapBuildSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo + +func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } +func (*CustomBuildStrategy) ProtoMessage() {} +func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{29} +} +func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomBuildStrategy.Merge(m, src) +} +func (m *CustomBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *CustomBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo + +func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } +func (*DockerBuildStrategy) ProtoMessage() {} +func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{30} +} +func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerBuildStrategy.Merge(m, src) +} +func (m *DockerBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *DockerBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo + +func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} } +func (*DockerStrategyOptions) ProtoMessage() {} +func (*DockerStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{31} +} +func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerStrategyOptions.Merge(m, src) +} +func (m *DockerStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *DockerStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo + +func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } +func (*GenericWebHookCause) ProtoMessage() {} +func (*GenericWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{32} +} +func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookCause.Merge(m, src) +} +func (m *GenericWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo + +func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } +func (*GenericWebHookEvent) ProtoMessage() {} +func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{33} +} +func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookEvent.Merge(m, src) +} +func (m *GenericWebHookEvent) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookEvent) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo + +func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } +func (*GitBuildSource) ProtoMessage() {} +func (*GitBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{34} +} +func (m *GitBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitBuildSource.Merge(m, src) +} +func (m *GitBuildSource) XXX_Size() int { + return m.Size() +} +func (m *GitBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo + +func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } +func (*GitHubWebHookCause) ProtoMessage() {} +func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{35} +} +func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitHubWebHookCause.Merge(m, src) +} +func (m *GitHubWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitHubWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo + +func (m *GitInfo) Reset() { *m = GitInfo{} } +func (*GitInfo) ProtoMessage() {} +func (*GitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{36} +} +func (m *GitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitInfo.Merge(m, src) +} +func (m *GitInfo) XXX_Size() int { + return m.Size() +} +func (m *GitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitInfo proto.InternalMessageInfo + +func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} } +func (*GitLabWebHookCause) ProtoMessage() {} +func (*GitLabWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{37} +} +func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitLabWebHookCause.Merge(m, src) +} +func (m *GitLabWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitLabWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo + +func (m *GitRefInfo) Reset() { *m = GitRefInfo{} } +func (*GitRefInfo) ProtoMessage() {} +func (*GitRefInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{38} +} +func (m *GitRefInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRefInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRefInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRefInfo.Merge(m, src) +} +func (m *GitRefInfo) XXX_Size() int { + return m.Size() +} +func (m *GitRefInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitRefInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitRefInfo proto.InternalMessageInfo + +func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} } +func (*GitSourceRevision) ProtoMessage() {} +func (*GitSourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{39} +} +func (m *GitSourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitSourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitSourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitSourceRevision.Merge(m, src) +} +func (m *GitSourceRevision) XXX_Size() int { + return m.Size() +} +func (m *GitSourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_GitSourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_GitSourceRevision proto.InternalMessageInfo + +func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} } +func (*ImageChangeCause) ProtoMessage() {} +func (*ImageChangeCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{40} +} +func (m *ImageChangeCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeCause.Merge(m, src) +} +func (m *ImageChangeCause) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeCause) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeCause.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeCause proto.InternalMessageInfo + +func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} } +func (*ImageChangeTrigger) ProtoMessage() {} +func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{41} +} +func (m *ImageChangeTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTrigger.Merge(m, src) +} +func (m *ImageChangeTrigger) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTrigger proto.InternalMessageInfo + +func (m *ImageChangeTriggerStatus) Reset() { *m = ImageChangeTriggerStatus{} } +func (*ImageChangeTriggerStatus) ProtoMessage() {} +func (*ImageChangeTriggerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{42} +} +func (m *ImageChangeTriggerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTriggerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTriggerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTriggerStatus.Merge(m, src) +} +func (m *ImageChangeTriggerStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTriggerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTriggerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTriggerStatus proto.InternalMessageInfo + +func (m *ImageLabel) Reset() { *m = ImageLabel{} } +func (*ImageLabel) ProtoMessage() {} +func (*ImageLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{43} +} +func (m *ImageLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLabel.Merge(m, src) +} +func (m *ImageLabel) XXX_Size() int { + return m.Size() +} +func (m *ImageLabel) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLabel.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLabel proto.InternalMessageInfo + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{44} +} +func (m *ImageSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSource.Merge(m, src) +} +func (m *ImageSource) XXX_Size() int { + return m.Size() +} +func (m *ImageSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSource proto.InternalMessageInfo + +func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } +func (*ImageSourcePath) ProtoMessage() {} +func (*ImageSourcePath) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{45} +} +func (m *ImageSourcePath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSourcePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSourcePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSourcePath.Merge(m, src) +} +func (m *ImageSourcePath) XXX_Size() int { + return m.Size() +} +func (m *ImageSourcePath) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSourcePath.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSourcePath proto.InternalMessageInfo + +func (m *ImageStreamTagReference) Reset() { *m = ImageStreamTagReference{} } +func (*ImageStreamTagReference) ProtoMessage() {} +func (*ImageStreamTagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{46} +} +func (m *ImageStreamTagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagReference.Merge(m, src) +} +func (m *ImageStreamTagReference) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagReference) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagReference proto.InternalMessageInfo + +func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } +func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} +func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{47} +} +func (m *JenkinsPipelineBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JenkinsPipelineBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_JenkinsPipelineBuildStrategy.Merge(m, src) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *JenkinsPipelineBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_JenkinsPipelineBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_JenkinsPipelineBuildStrategy proto.InternalMessageInfo + +func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} } +func (*OptionalNodeSelector) ProtoMessage() {} +func (*OptionalNodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{48} +} +func (m *OptionalNodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNodeSelector.Merge(m, src) +} +func (m *OptionalNodeSelector) XXX_Size() int { + return m.Size() +} +func (m *OptionalNodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNodeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNodeSelector proto.InternalMessageInfo + +func (m *ProxyConfig) Reset() { *m = ProxyConfig{} } +func (*ProxyConfig) ProtoMessage() {} +func (*ProxyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{49} +} +func (m *ProxyConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProxyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyConfig.Merge(m, src) +} +func (m *ProxyConfig) XXX_Size() int { + return m.Size() +} +func (m *ProxyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyConfig proto.InternalMessageInfo + +func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } +func (*SecretBuildSource) ProtoMessage() {} +func (*SecretBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{50} +} +func (m *SecretBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBuildSource.Merge(m, src) +} +func (m *SecretBuildSource) XXX_Size() int { + return m.Size() +} +func (m *SecretBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBuildSource proto.InternalMessageInfo + +func (m *SecretLocalReference) Reset() { *m = SecretLocalReference{} } +func (*SecretLocalReference) ProtoMessage() {} +func (*SecretLocalReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{51} +} +func (m *SecretLocalReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretLocalReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretLocalReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretLocalReference.Merge(m, src) +} +func (m *SecretLocalReference) XXX_Size() int { + return m.Size() +} +func (m *SecretLocalReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretLocalReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretLocalReference proto.InternalMessageInfo + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{52} +} +func (m *SecretSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretSpec.Merge(m, src) +} +func (m *SecretSpec) XXX_Size() int { + return m.Size() +} +func (m *SecretSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SecretSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretSpec proto.InternalMessageInfo + +func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } +func (*SourceBuildStrategy) ProtoMessage() {} +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{53} +} +func (m *SourceBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceBuildStrategy.Merge(m, src) +} +func (m *SourceBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *SourceBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_SourceBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceBuildStrategy proto.InternalMessageInfo + +func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } +func (*SourceControlUser) ProtoMessage() {} +func (*SourceControlUser) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{54} +} +func (m *SourceControlUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceControlUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceControlUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceControlUser.Merge(m, src) +} +func (m *SourceControlUser) XXX_Size() int { + return m.Size() +} +func (m *SourceControlUser) XXX_DiscardUnknown() { + xxx_messageInfo_SourceControlUser.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceControlUser proto.InternalMessageInfo + +func (m *SourceRevision) Reset() { *m = SourceRevision{} } +func (*SourceRevision) ProtoMessage() {} +func (*SourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{55} +} +func (m *SourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceRevision.Merge(m, src) +} +func (m *SourceRevision) XXX_Size() int { + return m.Size() +} +func (m *SourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_SourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceRevision proto.InternalMessageInfo + +func (m *SourceStrategyOptions) Reset() { *m = SourceStrategyOptions{} } +func (*SourceStrategyOptions) ProtoMessage() {} +func (*SourceStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{56} +} +func (m *SourceStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceStrategyOptions.Merge(m, src) +} +func (m *SourceStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SourceStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SourceStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceStrategyOptions proto.InternalMessageInfo + +func (m *StageInfo) Reset() { *m = StageInfo{} } +func (*StageInfo) ProtoMessage() {} +func (*StageInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{57} +} +func (m *StageInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StageInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StageInfo.Merge(m, src) +} +func (m *StageInfo) XXX_Size() int { + return m.Size() +} +func (m *StageInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StageInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StageInfo proto.InternalMessageInfo + +func (m *StepInfo) Reset() { *m = StepInfo{} } +func (*StepInfo) ProtoMessage() {} +func (*StepInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{58} +} +func (m *StepInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StepInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepInfo.Merge(m, src) +} +func (m *StepInfo) XXX_Size() int { + return m.Size() +} +func (m *StepInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StepInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StepInfo proto.InternalMessageInfo + +func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } +func (*WebHookTrigger) ProtoMessage() {} +func (*WebHookTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{59} +} +func (m *WebHookTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebHookTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebHookTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebHookTrigger.Merge(m, src) +} +func (m *WebHookTrigger) XXX_Size() int { + return m.Size() +} +func (m *WebHookTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_WebHookTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_WebHookTrigger proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BinaryBuildRequestOptions)(nil), "git.colasdn.top.openshift.api.build.v1.BinaryBuildRequestOptions") + proto.RegisterType((*BinaryBuildSource)(nil), "git.colasdn.top.openshift.api.build.v1.BinaryBuildSource") + proto.RegisterType((*BitbucketWebHookCause)(nil), "git.colasdn.top.openshift.api.build.v1.BitbucketWebHookCause") + proto.RegisterType((*Build)(nil), "git.colasdn.top.openshift.api.build.v1.Build") + proto.RegisterType((*BuildCondition)(nil), "git.colasdn.top.openshift.api.build.v1.BuildCondition") + proto.RegisterType((*BuildConfig)(nil), "git.colasdn.top.openshift.api.build.v1.BuildConfig") + proto.RegisterType((*BuildConfigList)(nil), "git.colasdn.top.openshift.api.build.v1.BuildConfigList") + proto.RegisterType((*BuildConfigSpec)(nil), "git.colasdn.top.openshift.api.build.v1.BuildConfigSpec") + proto.RegisterType((*BuildConfigStatus)(nil), "git.colasdn.top.openshift.api.build.v1.BuildConfigStatus") + proto.RegisterType((*BuildList)(nil), "git.colasdn.top.openshift.api.build.v1.BuildList") + proto.RegisterType((*BuildLog)(nil), "git.colasdn.top.openshift.api.build.v1.BuildLog") + proto.RegisterType((*BuildLogOptions)(nil), "git.colasdn.top.openshift.api.build.v1.BuildLogOptions") + proto.RegisterType((*BuildOutput)(nil), "git.colasdn.top.openshift.api.build.v1.BuildOutput") + proto.RegisterType((*BuildPostCommitSpec)(nil), "git.colasdn.top.openshift.api.build.v1.BuildPostCommitSpec") + proto.RegisterType((*BuildRequest)(nil), "git.colasdn.top.openshift.api.build.v1.BuildRequest") + proto.RegisterType((*BuildSource)(nil), "git.colasdn.top.openshift.api.build.v1.BuildSource") + proto.RegisterType((*BuildSpec)(nil), "git.colasdn.top.openshift.api.build.v1.BuildSpec") + proto.RegisterType((*BuildStatus)(nil), "git.colasdn.top.openshift.api.build.v1.BuildStatus") + proto.RegisterType((*BuildStatusOutput)(nil), "git.colasdn.top.openshift.api.build.v1.BuildStatusOutput") + proto.RegisterType((*BuildStatusOutputTo)(nil), "git.colasdn.top.openshift.api.build.v1.BuildStatusOutputTo") + proto.RegisterType((*BuildStrategy)(nil), "git.colasdn.top.openshift.api.build.v1.BuildStrategy") + proto.RegisterType((*BuildTriggerCause)(nil), "git.colasdn.top.openshift.api.build.v1.BuildTriggerCause") + proto.RegisterType((*BuildTriggerPolicy)(nil), "git.colasdn.top.openshift.api.build.v1.BuildTriggerPolicy") + proto.RegisterType((*BuildVolume)(nil), "git.colasdn.top.openshift.api.build.v1.BuildVolume") + proto.RegisterType((*BuildVolumeMount)(nil), "git.colasdn.top.openshift.api.build.v1.BuildVolumeMount") + proto.RegisterType((*BuildVolumeSource)(nil), "git.colasdn.top.openshift.api.build.v1.BuildVolumeSource") + proto.RegisterType((*CommonSpec)(nil), "git.colasdn.top.openshift.api.build.v1.CommonSpec") + proto.RegisterType((*CommonWebHookCause)(nil), "git.colasdn.top.openshift.api.build.v1.CommonWebHookCause") + proto.RegisterType((*ConfigMapBuildSource)(nil), "git.colasdn.top.openshift.api.build.v1.ConfigMapBuildSource") + proto.RegisterType((*CustomBuildStrategy)(nil), "git.colasdn.top.openshift.api.build.v1.CustomBuildStrategy") + proto.RegisterType((*DockerBuildStrategy)(nil), "git.colasdn.top.openshift.api.build.v1.DockerBuildStrategy") + proto.RegisterType((*DockerStrategyOptions)(nil), "git.colasdn.top.openshift.api.build.v1.DockerStrategyOptions") + proto.RegisterType((*GenericWebHookCause)(nil), "git.colasdn.top.openshift.api.build.v1.GenericWebHookCause") + proto.RegisterType((*GenericWebHookEvent)(nil), "git.colasdn.top.openshift.api.build.v1.GenericWebHookEvent") + proto.RegisterType((*GitBuildSource)(nil), "git.colasdn.top.openshift.api.build.v1.GitBuildSource") + proto.RegisterType((*GitHubWebHookCause)(nil), "git.colasdn.top.openshift.api.build.v1.GitHubWebHookCause") + proto.RegisterType((*GitInfo)(nil), "git.colasdn.top.openshift.api.build.v1.GitInfo") + proto.RegisterType((*GitLabWebHookCause)(nil), "git.colasdn.top.openshift.api.build.v1.GitLabWebHookCause") + proto.RegisterType((*GitRefInfo)(nil), "git.colasdn.top.openshift.api.build.v1.GitRefInfo") + proto.RegisterType((*GitSourceRevision)(nil), "git.colasdn.top.openshift.api.build.v1.GitSourceRevision") + proto.RegisterType((*ImageChangeCause)(nil), "git.colasdn.top.openshift.api.build.v1.ImageChangeCause") + proto.RegisterType((*ImageChangeTrigger)(nil), "git.colasdn.top.openshift.api.build.v1.ImageChangeTrigger") + proto.RegisterType((*ImageChangeTriggerStatus)(nil), "git.colasdn.top.openshift.api.build.v1.ImageChangeTriggerStatus") + proto.RegisterType((*ImageLabel)(nil), "git.colasdn.top.openshift.api.build.v1.ImageLabel") + proto.RegisterType((*ImageSource)(nil), "git.colasdn.top.openshift.api.build.v1.ImageSource") + proto.RegisterType((*ImageSourcePath)(nil), "git.colasdn.top.openshift.api.build.v1.ImageSourcePath") + proto.RegisterType((*ImageStreamTagReference)(nil), "git.colasdn.top.openshift.api.build.v1.ImageStreamTagReference") + proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "git.colasdn.top.openshift.api.build.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*OptionalNodeSelector)(nil), "git.colasdn.top.openshift.api.build.v1.OptionalNodeSelector") + proto.RegisterMapType((map[string]string)(nil), "git.colasdn.top.openshift.api.build.v1.OptionalNodeSelector.ItemsEntry") + proto.RegisterType((*ProxyConfig)(nil), "git.colasdn.top.openshift.api.build.v1.ProxyConfig") + proto.RegisterType((*SecretBuildSource)(nil), "git.colasdn.top.openshift.api.build.v1.SecretBuildSource") + proto.RegisterType((*SecretLocalReference)(nil), "git.colasdn.top.openshift.api.build.v1.SecretLocalReference") + proto.RegisterType((*SecretSpec)(nil), "git.colasdn.top.openshift.api.build.v1.SecretSpec") + proto.RegisterType((*SourceBuildStrategy)(nil), "git.colasdn.top.openshift.api.build.v1.SourceBuildStrategy") + proto.RegisterType((*SourceControlUser)(nil), "git.colasdn.top.openshift.api.build.v1.SourceControlUser") + proto.RegisterType((*SourceRevision)(nil), "git.colasdn.top.openshift.api.build.v1.SourceRevision") + proto.RegisterType((*SourceStrategyOptions)(nil), "git.colasdn.top.openshift.api.build.v1.SourceStrategyOptions") + proto.RegisterType((*StageInfo)(nil), "git.colasdn.top.openshift.api.build.v1.StageInfo") + proto.RegisterType((*StepInfo)(nil), "git.colasdn.top.openshift.api.build.v1.StepInfo") + proto.RegisterType((*WebHookTrigger)(nil), "git.colasdn.top.openshift.api.build.v1.WebHookTrigger") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/build/v1/generated.proto", fileDescriptor_2ba579f6f004cb75) +} + +var fileDescriptor_2ba579f6f004cb75 = []byte{ + // 4386 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1c, 0x47, + 0x76, 0x56, 0xcf, 0x0f, 0x67, 0xe6, 0x0d, 0x45, 0x52, 0x45, 0xc9, 0x1a, 0x69, 0xb5, 0x1c, 0xb9, + 0x1d, 0x1b, 0x76, 0x6c, 0x0f, 0x57, 0xb2, 0xa4, 0xc8, 0x36, 0xe2, 0x80, 0x43, 0x52, 0x32, 0xb5, + 0x23, 0x89, 0xa8, 0xa1, 0x65, 0xef, 0x5a, 0xd8, 0xa4, 0xd9, 0x53, 0x33, 0x6c, 0x73, 0xa6, 0x7b, + 0xdc, 0xd5, 0x43, 0x9b, 0x0b, 0x04, 0x58, 0x04, 0x58, 0x24, 0xeb, 0xbd, 0x64, 0x2f, 0x8b, 0x24, + 0x97, 0x24, 0x58, 0xe4, 0x94, 0x53, 0x02, 0x04, 0xd8, 0x60, 0x2f, 0x01, 0xb2, 0x07, 0x1f, 0x12, + 0x60, 0x83, 0x04, 0x88, 0x81, 0x5d, 0x0c, 0x62, 0xe6, 0x10, 0x20, 0x87, 0x00, 0xb9, 0xea, 0x10, + 0x04, 0xf5, 0xd3, 0xdd, 0x55, 0x3d, 0x3d, 0x54, 0x0f, 0x25, 0x3b, 0x9b, 0xe4, 0xc6, 0xa9, 0xf7, + 0xde, 0xf7, 0xea, 0xe7, 0xd5, 0xab, 0xf7, 0x5e, 0x55, 0x13, 0xae, 0xf4, 0x9c, 0x60, 0x6f, 0xb4, + 0xdb, 0xb0, 0xbd, 0xc1, 0xaa, 0x37, 0x24, 0x2e, 0xdd, 0x73, 0xba, 0xc1, 0xaa, 0x35, 0x74, 0x56, + 0x77, 0x47, 0x4e, 0xbf, 0xb3, 0x7a, 0x70, 0x65, 0xb5, 0x47, 0x5c, 0xe2, 0x5b, 0x01, 0xe9, 0x34, + 0x86, 0xbe, 0x17, 0x78, 0xe8, 0xd9, 0x58, 0xa4, 0x11, 0x89, 0x34, 0xac, 0xa1, 0xd3, 0xe0, 0x22, + 0x8d, 0x83, 0x2b, 0x17, 0x5f, 0x55, 0x50, 0x7b, 0x5e, 0xcf, 0x5b, 0xe5, 0x92, 0xbb, 0xa3, 0x2e, + 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x04, 0xe2, 0x45, 0x73, 0xff, 0x26, 0x6d, 0x38, 0x1e, 0x57, 0x6b, + 0x7b, 0x3e, 0x49, 0xd1, 0x7a, 0xf1, 0x5a, 0xcc, 0x33, 0xb0, 0xec, 0x3d, 0xc7, 0x25, 0xfe, 0xe1, + 0xea, 0x70, 0xbf, 0xc7, 0x1a, 0xe8, 0xea, 0x80, 0x04, 0x56, 0x9a, 0xd4, 0x8d, 0x69, 0x52, 0xfe, + 0xc8, 0x0d, 0x9c, 0x01, 0x59, 0xa5, 0xf6, 0x1e, 0x19, 0x58, 0x49, 0x39, 0xf3, 0x6f, 0x0a, 0x70, + 0xa1, 0xe9, 0xb8, 0x96, 0x7f, 0xd8, 0x64, 0x63, 0xc2, 0xe4, 0xc3, 0x11, 0xa1, 0xc1, 0xfd, 0x61, + 0xe0, 0x78, 0x2e, 0x45, 0xbf, 0x05, 0x65, 0xa6, 0xb0, 0x63, 0x05, 0x56, 0xcd, 0xb8, 0x6c, 0xbc, + 0x58, 0xbd, 0xfa, 0xb5, 0x86, 0x50, 0xd4, 0x50, 0x15, 0x35, 0x86, 0xfb, 0x3d, 0xd6, 0x40, 0x1b, + 0x8c, 0xbb, 0x71, 0x70, 0xa5, 0x71, 0x7f, 0xf7, 0x03, 0x62, 0x07, 0x77, 0x49, 0x60, 0x35, 0xd1, + 0xa7, 0xe3, 0xfa, 0xa9, 0xa3, 0x71, 0x1d, 0xe2, 0x36, 0x1c, 0xa1, 0xa2, 0x17, 0x60, 0xce, 0xa2, + 0xb7, 0x9c, 0x3e, 0xa9, 0xe5, 0x2e, 0x1b, 0x2f, 0x56, 0x9a, 0x0b, 0x92, 0x7b, 0x6e, 0x8d, 0xb7, + 0x62, 0x49, 0x45, 0x37, 0x60, 0xc1, 0x27, 0x07, 0x0e, 0x75, 0x3c, 0x77, 0xdd, 0x1b, 0x0c, 0x9c, + 0xa0, 0x96, 0xd7, 0xf9, 0x45, 0x2b, 0x4e, 0x70, 0xa1, 0xd7, 0x61, 0x31, 0x6c, 0xb9, 0x4b, 0x28, + 0xb5, 0x7a, 0xa4, 0x56, 0xe0, 0x82, 0x8b, 0x52, 0xb0, 0x24, 0x9b, 0x71, 0x92, 0x0f, 0x35, 0x01, + 0x85, 0x4d, 0x6b, 0xa3, 0x60, 0xcf, 0xf3, 0xef, 0x59, 0x03, 0x52, 0x2b, 0x72, 0xe9, 0x68, 0x50, + 0x31, 0x05, 0xa7, 0x70, 0xa3, 0x4d, 0x58, 0xd6, 0x5b, 0x37, 0x07, 0x96, 0xd3, 0xaf, 0xcd, 0x71, + 0x90, 0x65, 0x09, 0x52, 0x55, 0x48, 0x38, 0x8d, 0x1f, 0x7d, 0x1d, 0xce, 0xe9, 0xe3, 0x0a, 0x88, + 0xe8, 0x4d, 0x89, 0x03, 0x9d, 0x93, 0x40, 0xa7, 0x35, 0x22, 0x4e, 0x97, 0x41, 0xf7, 0xe0, 0x99, + 0x09, 0x82, 0xe8, 0x56, 0x99, 0xa3, 0x3d, 0x23, 0xd1, 0x16, 0x74, 0x2a, 0x9e, 0x22, 0x65, 0xbe, + 0x09, 0x67, 0x14, 0x0b, 0x6a, 0x7b, 0x23, 0xdf, 0x26, 0xca, 0xba, 0x1a, 0xc7, 0xad, 0xab, 0xf9, + 0x89, 0x01, 0xe7, 0x9a, 0x4e, 0xb0, 0x3b, 0xb2, 0xf7, 0x49, 0xf0, 0x2e, 0xd9, 0x7d, 0xdb, 0xf3, + 0xf6, 0xd7, 0xad, 0x11, 0x25, 0xe8, 0x43, 0x00, 0xdb, 0x1b, 0x0c, 0x3c, 0xb7, 0x3d, 0x24, 0xb6, + 0xb4, 0xbe, 0xeb, 0x8d, 0xc7, 0x6e, 0xc9, 0xc6, 0x3a, 0x17, 0x52, 0xa1, 0x9a, 0x17, 0xa5, 0x72, + 0x34, 0x49, 0xc3, 0x8a, 0x12, 0xf3, 0x07, 0x39, 0x28, 0xf2, 0x41, 0x7c, 0x09, 0x86, 0x7f, 0x0f, + 0x0a, 0x94, 0x0d, 0x2c, 0xc7, 0xd1, 0x5f, 0xc9, 0x30, 0x30, 0x31, 0xbd, 0x43, 0x62, 0x37, 0xe7, + 0x25, 0x72, 0x81, 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x80, 0x39, 0x1a, 0x58, 0xc1, 0x88, 0xf2, 0x8d, + 0x51, 0xbd, 0xda, 0xc8, 0x8c, 0xc8, 0xa5, 0xe2, 0x05, 0x12, 0xbf, 0xb1, 0x44, 0x33, 0xff, 0x3e, + 0x0f, 0x0b, 0x9c, 0x6f, 0xdd, 0x73, 0x3b, 0x0e, 0x73, 0x0b, 0xe8, 0x06, 0x14, 0x82, 0xc3, 0x61, + 0xb8, 0xb2, 0x66, 0xd8, 0x99, 0x9d, 0xc3, 0x21, 0x79, 0x34, 0xae, 0x23, 0x9d, 0x9b, 0xb5, 0x62, + 0xce, 0x8f, 0x5a, 0x51, 0x17, 0xc5, 0x5e, 0xbf, 0xa6, 0xab, 0x7c, 0x34, 0xae, 0xa7, 0xf8, 0xc7, + 0x46, 0x84, 0xa4, 0x77, 0x0c, 0x7d, 0x00, 0x0b, 0x7d, 0x8b, 0x06, 0xef, 0x0c, 0x3b, 0x56, 0x40, + 0x76, 0x9c, 0x01, 0xe1, 0xbb, 0xaa, 0x7a, 0xf5, 0x57, 0xb3, 0x2d, 0x14, 0x93, 0x88, 0x4d, 0xbd, + 0xa5, 0x21, 0xe1, 0x04, 0x32, 0x3a, 0x00, 0xc4, 0x5a, 0x76, 0x7c, 0xcb, 0xa5, 0x62, 0x54, 0x4c, + 0x5f, 0x7e, 0x66, 0x7d, 0x91, 0x21, 0xb6, 0x26, 0xd0, 0x70, 0x8a, 0x06, 0xb6, 0x8b, 0x7c, 0x62, + 0x51, 0xcf, 0x95, 0x4e, 0x2b, 0x5a, 0x24, 0xcc, 0x5b, 0xb1, 0xa4, 0xa2, 0x97, 0xa0, 0x34, 0x90, + 0xde, 0xad, 0x98, 0xee, 0xdd, 0x42, 0xba, 0xf9, 0xa3, 0x1c, 0x54, 0xc3, 0x15, 0xea, 0x3a, 0xbd, + 0x2f, 0xc1, 0xd2, 0x77, 0x34, 0x4b, 0xbf, 0x9a, 0xd5, 0x2e, 0x45, 0xff, 0xa6, 0xda, 0xfb, 0xc3, + 0x84, 0xbd, 0x5f, 0x9b, 0x11, 0xf7, 0x78, 0xab, 0xff, 0xa9, 0x01, 0x8b, 0x0a, 0x77, 0xcb, 0xa1, + 0x01, 0x7a, 0x38, 0x31, 0x53, 0x8d, 0x6c, 0x33, 0xc5, 0xa4, 0xf9, 0x3c, 0x2d, 0x49, 0x6d, 0xe5, + 0xb0, 0x45, 0x99, 0xa5, 0x36, 0x14, 0x9d, 0x80, 0x0c, 0xd8, 0xde, 0xc8, 0xcf, 0xb2, 0x7d, 0x45, + 0x07, 0x9b, 0xa7, 0x25, 0x74, 0x71, 0x8b, 0x81, 0x60, 0x81, 0x65, 0xfe, 0x22, 0xaf, 0x0d, 0x83, + 0x4d, 0x1f, 0xb2, 0xa1, 0x1c, 0xf8, 0x4e, 0xaf, 0x47, 0x7c, 0x5a, 0x33, 0xb8, 0xae, 0xeb, 0x59, + 0x75, 0xed, 0x08, 0xb9, 0x6d, 0xaf, 0xef, 0xd8, 0x87, 0xf1, 0x68, 0x64, 0x33, 0xc5, 0x11, 0x30, + 0x5a, 0x83, 0x8a, 0x3f, 0x72, 0x05, 0xa3, 0xdc, 0xed, 0xcf, 0x49, 0xf6, 0x0a, 0x0e, 0x09, 0x8f, + 0xc6, 0x75, 0xe1, 0x5a, 0xa2, 0x16, 0x1c, 0x4b, 0x21, 0x4b, 0xf3, 0xff, 0x62, 0x91, 0x5f, 0xcd, + 0xec, 0xff, 0xb9, 0xdd, 0x44, 0x76, 0x19, 0xb7, 0xa9, 0xfe, 0x1e, 0x75, 0xe0, 0x12, 0x1d, 0xd9, + 0x36, 0xa1, 0xb4, 0x3b, 0xea, 0xf3, 0x9e, 0xd0, 0xb7, 0x1d, 0x1a, 0x78, 0xfe, 0x61, 0xcb, 0x61, + 0x21, 0x06, 0xdb, 0x74, 0xc5, 0xe6, 0xe5, 0xa3, 0x71, 0xfd, 0x52, 0xfb, 0x18, 0x3e, 0x7c, 0x2c, + 0x0a, 0x7a, 0x0f, 0x6a, 0x5d, 0xcb, 0xe9, 0x93, 0x4e, 0x8a, 0x86, 0x22, 0xd7, 0x70, 0xe9, 0x68, + 0x5c, 0xaf, 0xdd, 0x9a, 0xc2, 0x83, 0xa7, 0x4a, 0x9b, 0xff, 0x6c, 0xc0, 0x99, 0x09, 0x9b, 0x46, + 0xd7, 0xa1, 0xca, 0x5c, 0xc9, 0x03, 0xe2, 0xb3, 0xc3, 0x9a, 0x9b, 0x6a, 0x3e, 0x8e, 0x35, 0x5a, + 0x31, 0x09, 0xab, 0x7c, 0xe8, 0x13, 0x03, 0x96, 0x9d, 0x81, 0xd5, 0x23, 0xeb, 0x7b, 0x96, 0xdb, + 0x23, 0xe1, 0xa2, 0x4a, 0x7b, 0x7c, 0x33, 0xc3, 0xcc, 0x6f, 0x4d, 0x48, 0xcb, 0x5d, 0xf6, 0x15, + 0xa9, 0x7c, 0x79, 0x92, 0x83, 0xe2, 0x34, 0xa5, 0xe6, 0x8f, 0x0d, 0xa8, 0xf0, 0x91, 0x7d, 0x09, + 0x3b, 0xef, 0xae, 0xbe, 0xf3, 0x5e, 0xcc, 0xba, 0x1b, 0xa6, 0xec, 0x39, 0x80, 0xb2, 0xe8, 0xb9, + 0xd7, 0x33, 0xff, 0xb3, 0x20, 0xf7, 0x5f, 0xcb, 0xeb, 0x85, 0x31, 0xf5, 0x2a, 0x54, 0x6c, 0xcf, + 0x0d, 0x2c, 0xd6, 0x65, 0x79, 0x84, 0x9e, 0x09, 0xb7, 0xc6, 0x7a, 0x48, 0xc0, 0x31, 0x0f, 0x3b, + 0x04, 0xba, 0x5e, 0xbf, 0xef, 0x7d, 0xc4, 0x37, 0x52, 0x39, 0xf6, 0x59, 0xb7, 0x78, 0x2b, 0x96, + 0x54, 0xf4, 0x0a, 0x94, 0x87, 0x2c, 0x44, 0xf3, 0xa4, 0x4f, 0x2c, 0xc7, 0xa3, 0xde, 0x96, 0xed, + 0x38, 0xe2, 0x40, 0xd7, 0x60, 0x9e, 0x3a, 0xae, 0x4d, 0xda, 0xc4, 0xf6, 0xdc, 0x0e, 0xe5, 0xb6, + 0x9e, 0x6f, 0x2e, 0x1d, 0x8d, 0xeb, 0xf3, 0x6d, 0xa5, 0x1d, 0x6b, 0x5c, 0xe8, 0x5d, 0xa8, 0xf0, + 0xdf, 0xfc, 0xfc, 0x2b, 0xce, 0x7c, 0xfe, 0x9d, 0x66, 0x83, 0x6c, 0x87, 0x00, 0x38, 0xc6, 0x42, + 0x57, 0x01, 0x58, 0x9a, 0x42, 0x03, 0x6b, 0x30, 0xa4, 0xfc, 0x24, 0x2f, 0xc7, 0xdb, 0x77, 0x27, + 0xa2, 0x60, 0x85, 0x0b, 0xbd, 0x0c, 0x95, 0xc0, 0x72, 0xfa, 0x2d, 0xc7, 0x25, 0x94, 0x47, 0xc2, + 0x79, 0xa1, 0x60, 0x27, 0x6c, 0xc4, 0x31, 0x1d, 0x35, 0x00, 0xfa, 0x6c, 0xd3, 0x34, 0x0f, 0x03, + 0x42, 0x79, 0xa4, 0x9b, 0x6f, 0x2e, 0x30, 0xf0, 0x56, 0xd4, 0x8a, 0x15, 0x0e, 0x36, 0xeb, 0xae, + 0xf7, 0x91, 0xe5, 0x04, 0xb5, 0x8a, 0x3e, 0xeb, 0xf7, 0xbc, 0x77, 0x2d, 0x27, 0xc0, 0x92, 0x8a, + 0x9e, 0x87, 0xd2, 0x81, 0xdc, 0x69, 0xc0, 0x41, 0xab, 0xec, 0xd8, 0x0d, 0x77, 0x58, 0x48, 0x43, + 0x7b, 0x70, 0xc9, 0x71, 0x29, 0xb1, 0x47, 0x3e, 0x69, 0xef, 0x3b, 0xc3, 0x9d, 0x56, 0xfb, 0x01, + 0xf1, 0x9d, 0xee, 0x61, 0xd3, 0xb2, 0xf7, 0x89, 0xdb, 0xa9, 0x55, 0xb9, 0x92, 0x5f, 0x91, 0x4a, + 0x2e, 0x6d, 0x1d, 0xc3, 0x8b, 0x8f, 0x45, 0x32, 0x3f, 0x09, 0x0f, 0xf8, 0xfb, 0xa3, 0x60, 0x38, + 0x0a, 0xd0, 0x9b, 0x90, 0x0b, 0x3c, 0xb9, 0x6d, 0x9e, 0x53, 0xd6, 0xaa, 0xc1, 0x02, 0xac, 0xf8, + 0x20, 0xc7, 0xa4, 0x4b, 0x7c, 0xe2, 0xda, 0xa4, 0x39, 0x77, 0x34, 0xae, 0xe7, 0x76, 0x3c, 0x9c, + 0x0b, 0x3c, 0xf4, 0x1e, 0xc0, 0x70, 0x44, 0xf7, 0xda, 0xc4, 0xf6, 0x49, 0x20, 0x4f, 0xf0, 0x17, + 0xd3, 0x40, 0x5a, 0x9e, 0x6d, 0xf5, 0x93, 0x48, 0x7c, 0x7e, 0xb7, 0x23, 0x79, 0xac, 0x60, 0xa1, + 0x0e, 0x54, 0xf9, 0xc6, 0x6f, 0x59, 0xbb, 0xa4, 0xcf, 0x0c, 0x36, 0x9f, 0xd1, 0xbf, 0x6f, 0x45, + 0x52, 0xb1, 0x53, 0x8b, 0xdb, 0x28, 0x56, 0x61, 0xcd, 0xdf, 0x31, 0x60, 0x99, 0x4f, 0xc6, 0xb6, + 0x47, 0x03, 0x91, 0xb7, 0x70, 0xcf, 0xff, 0x3c, 0x94, 0xd8, 0x39, 0x60, 0xb9, 0x1d, 0x7e, 0x06, + 0x56, 0xc4, 0xaa, 0xad, 0x8b, 0x26, 0x1c, 0xd2, 0xd0, 0x25, 0x28, 0x58, 0x7e, 0x4f, 0x78, 0x86, + 0x4a, 0xb3, 0xcc, 0x42, 0x90, 0x35, 0xbf, 0x47, 0x31, 0x6f, 0x65, 0x26, 0x42, 0x6d, 0xdf, 0x19, + 0x4e, 0xe4, 0xa2, 0x6d, 0xde, 0x8a, 0x25, 0xd5, 0xfc, 0x69, 0x09, 0xe6, 0xd5, 0xec, 0xfa, 0x4b, + 0x88, 0xb9, 0xde, 0x87, 0x72, 0x98, 0xad, 0xc9, 0x55, 0xbb, 0x92, 0x61, 0x6a, 0x45, 0xee, 0x86, + 0xa5, 0x60, 0x73, 0x9e, 0xb9, 0x8e, 0xf0, 0x17, 0x8e, 0x00, 0x11, 0x81, 0x25, 0x79, 0xd0, 0x93, + 0x4e, 0xf3, 0x90, 0xcf, 0xbd, 0x3c, 0x9f, 0x33, 0xd9, 0xd7, 0xd9, 0xa3, 0x71, 0x7d, 0x69, 0x27, + 0x01, 0x80, 0x27, 0x20, 0xd1, 0x1a, 0x14, 0xba, 0xbe, 0x37, 0xe0, 0x9e, 0x29, 0x23, 0x34, 0x5f, + 0xa1, 0x5b, 0xbe, 0x37, 0xc0, 0x5c, 0x14, 0xbd, 0x07, 0x73, 0xbb, 0x3c, 0x35, 0x95, 0xbe, 0x2a, + 0x53, 0x90, 0x98, 0xcc, 0x65, 0x9b, 0xc0, 0xd6, 0x54, 0x34, 0x63, 0x89, 0x87, 0xae, 0xe8, 0x87, + 0xec, 0x1c, 0xdf, 0xfa, 0x8b, 0xc7, 0x1e, 0xb0, 0xaf, 0x43, 0x9e, 0xb8, 0x07, 0xb5, 0x12, 0xb7, + 0xf4, 0x8b, 0x69, 0xc3, 0xd9, 0x74, 0x0f, 0x1e, 0x58, 0x7e, 0xb3, 0x2a, 0x97, 0x36, 0xbf, 0xe9, + 0x1e, 0x60, 0x26, 0x83, 0xf6, 0xa1, 0xaa, 0x4c, 0x4f, 0xad, 0xcc, 0x21, 0xae, 0xcd, 0x18, 0xb6, + 0x89, 0x5c, 0x38, 0xda, 0x33, 0xca, 0x0a, 0x60, 0x15, 0x1d, 0x7d, 0xcf, 0x80, 0x73, 0x1d, 0xcf, + 0xde, 0x67, 0xc7, 0xb7, 0x6f, 0x05, 0xa4, 0x77, 0x28, 0x8f, 0x2e, 0xee, 0x09, 0xab, 0x57, 0x6f, + 0x66, 0xd0, 0xbb, 0x91, 0x26, 0xdf, 0xbc, 0x70, 0x34, 0xae, 0x9f, 0x4b, 0x25, 0xe1, 0x74, 0x8d, + 0xbc, 0x2f, 0x94, 0xaf, 0x42, 0xb2, 0x2f, 0x90, 0xb9, 0x2f, 0xed, 0x34, 0x79, 0xd1, 0x97, 0x54, + 0x12, 0x4e, 0xd7, 0x68, 0xfe, 0x53, 0x51, 0x3a, 0x56, 0x59, 0xe2, 0x78, 0x4d, 0x4b, 0x83, 0xeb, + 0x89, 0x34, 0x78, 0x51, 0x61, 0x55, 0x72, 0xe0, 0xd8, 0x22, 0x73, 0x4f, 0xd9, 0x22, 0x1b, 0x00, + 0x62, 0x0e, 0xbb, 0x4e, 0x9f, 0x84, 0x1e, 0x89, 0x39, 0x88, 0x8d, 0xa8, 0x15, 0x2b, 0x1c, 0xa8, + 0x05, 0xf9, 0x9e, 0x8c, 0x71, 0xb3, 0x79, 0x87, 0xdb, 0x4e, 0xa0, 0xf6, 0xa1, 0xc4, 0x2c, 0xf4, + 0xb6, 0x13, 0x60, 0x06, 0x83, 0x1e, 0xc0, 0x1c, 0xf7, 0xbb, 0xb4, 0x56, 0xcc, 0x9c, 0xbf, 0xf0, + 0x6d, 0x2e, 0xd1, 0x22, 0xdf, 0xc9, 0x1b, 0x29, 0x96, 0x68, 0x2c, 0x2e, 0x60, 0x91, 0x10, 0xf9, + 0x38, 0xd8, 0x70, 0x7c, 0x59, 0x37, 0x53, 0xc2, 0xfa, 0x90, 0x82, 0x15, 0x2e, 0xf4, 0x2d, 0x98, + 0x97, 0x2b, 0x28, 0x8e, 0xad, 0xd2, 0x8c, 0xc7, 0x96, 0x08, 0x82, 0x14, 0x04, 0xac, 0xe1, 0xa1, + 0xdf, 0x84, 0x12, 0xe5, 0x7f, 0xd1, 0x19, 0x76, 0xa2, 0x90, 0x55, 0x27, 0x30, 0xca, 0xd1, 0x05, + 0x89, 0xe2, 0x10, 0x15, 0xed, 0xf3, 0x41, 0x77, 0x9d, 0xde, 0x5d, 0x6b, 0xc8, 0x76, 0x1d, 0xd3, + 0xf1, 0x6b, 0x99, 0x52, 0x1f, 0x29, 0xa4, 0xaa, 0x51, 0x67, 0x4b, 0x42, 0x62, 0x05, 0xde, 0xfc, + 0x79, 0x18, 0x6a, 0xf3, 0x83, 0xd1, 0x4a, 0xa9, 0xba, 0x3d, 0xe5, 0xac, 0x2b, 0xe1, 0xcc, 0x72, + 0x5f, 0xa4, 0x33, 0x33, 0xff, 0xa3, 0x14, 0x6e, 0x5a, 0x91, 0x1c, 0x5d, 0x81, 0xe2, 0x70, 0xcf, + 0xa2, 0xe1, 0xae, 0x0d, 0x33, 0x93, 0xe2, 0x36, 0x6b, 0x7c, 0x34, 0xae, 0x83, 0x88, 0x16, 0xd8, + 0x2f, 0x2c, 0x38, 0x79, 0xc0, 0x6e, 0xb9, 0x36, 0xe9, 0xf7, 0x49, 0x47, 0x86, 0xe0, 0x71, 0xc0, + 0x1e, 0x12, 0x70, 0xcc, 0x83, 0x6e, 0x44, 0x55, 0x1b, 0xb1, 0x0b, 0x57, 0xf4, 0xaa, 0xcd, 0x23, + 0x66, 0x5d, 0xa2, 0xdc, 0x30, 0xb5, 0x8a, 0x53, 0x38, 0xbe, 0x8a, 0x83, 0xba, 0xb0, 0x40, 0x03, + 0xcb, 0x0f, 0xa2, 0xc8, 0xf8, 0x04, 0xc1, 0x38, 0x3a, 0x1a, 0xd7, 0x17, 0xda, 0x1a, 0x0a, 0x4e, + 0xa0, 0xa2, 0x11, 0x2c, 0xdb, 0xde, 0x60, 0xd8, 0x27, 0x61, 0x49, 0x4a, 0x28, 0x9b, 0xbd, 0xd2, + 0x76, 0x9e, 0xa5, 0x7f, 0xeb, 0x93, 0x50, 0x38, 0x0d, 0x1f, 0xfd, 0x3a, 0x94, 0x3b, 0x23, 0xdf, + 0x62, 0x8d, 0x32, 0xb0, 0x7f, 0x36, 0x4c, 0x65, 0x36, 0x64, 0xfb, 0xa3, 0x71, 0xfd, 0x34, 0xcb, + 0x05, 0x1a, 0x61, 0x03, 0x8e, 0x44, 0xd0, 0x2e, 0x5c, 0xf4, 0x78, 0xf0, 0x2b, 0x5c, 0x9f, 0x08, + 0x30, 0xc2, 0xed, 0x2d, 0xab, 0xdc, 0x61, 0xd9, 0xf2, 0xe2, 0xfd, 0xa9, 0x9c, 0xf8, 0x18, 0x14, + 0x74, 0x1b, 0xe6, 0xc4, 0x26, 0x92, 0xa7, 0x62, 0xa6, 0xf8, 0x04, 0xc4, 0x4d, 0x05, 0x13, 0xc3, + 0x52, 0x1c, 0x3d, 0x84, 0x39, 0xa1, 0x46, 0x1e, 0x69, 0xd7, 0x66, 0x2b, 0xdc, 0x8a, 0xee, 0xc7, + 0xfe, 0x53, 0xfc, 0xc6, 0x12, 0x13, 0xed, 0xf0, 0x32, 0x19, 0xf3, 0xcb, 0x55, 0xbe, 0xcf, 0xb2, + 0x14, 0x9a, 0xdb, 0x4c, 0x60, 0xcb, 0xed, 0x7a, 0x5a, 0x79, 0x8c, 0x7b, 0x65, 0x81, 0xc5, 0xbc, + 0x72, 0xdf, 0xeb, 0xb5, 0x5d, 0x67, 0x38, 0x24, 0x41, 0x6d, 0x5e, 0xf7, 0xca, 0xad, 0x88, 0x82, + 0x15, 0x2e, 0x44, 0xb8, 0x53, 0x13, 0xa5, 0x5c, 0x5a, 0x3b, 0xcd, 0x7b, 0x73, 0x65, 0x86, 0x2a, + 0x97, 0x90, 0xd4, 0xdc, 0x99, 0x04, 0xc3, 0x0a, 0xb0, 0x69, 0xcb, 0x92, 0x88, 0x3a, 0x3b, 0xe8, + 0x9e, 0x92, 0x03, 0xdd, 0x38, 0xc9, 0xfc, 0xee, 0x78, 0x6a, 0x5a, 0x64, 0xb6, 0x64, 0x56, 0xa1, + 0xb3, 0xa0, 0xeb, 0x32, 0xa7, 0xd9, 0x70, 0x7a, 0x84, 0x06, 0xd2, 0xc5, 0xe8, 0x49, 0x8a, 0x20, + 0x61, 0x95, 0xcf, 0xfc, 0x49, 0x01, 0x4e, 0x4b, 0x38, 0x11, 0x71, 0xa0, 0xeb, 0x5a, 0x68, 0xf1, + 0x6c, 0x22, 0xb4, 0x38, 0xa3, 0x31, 0x2b, 0xc1, 0x85, 0x0f, 0x0b, 0x7a, 0x18, 0x25, 0x83, 0x8c, + 0x1b, 0x99, 0x23, 0x36, 0x0d, 0x59, 0x78, 0x08, 0x3d, 0x5e, 0xc3, 0x09, 0x0d, 0x4c, 0xa7, 0x1e, + 0x2e, 0xc9, 0x54, 0xe0, 0x46, 0xe6, 0xc8, 0x2c, 0x45, 0xa7, 0x1e, 0x97, 0xe1, 0x84, 0x06, 0xa6, + 0xd3, 0x1e, 0xd1, 0xc0, 0x1b, 0x44, 0x3a, 0x0b, 0x99, 0x75, 0xae, 0x73, 0xc1, 0x14, 0x9d, 0xeb, + 0x1a, 0x22, 0x4e, 0x68, 0x40, 0x3f, 0x34, 0xe0, 0xfc, 0x07, 0xc4, 0xdd, 0x77, 0x5c, 0xba, 0xed, + 0x0c, 0x49, 0xdf, 0x71, 0xe3, 0x11, 0x0b, 0xdf, 0xfb, 0x1b, 0x19, 0xb4, 0xdf, 0xd1, 0x11, 0xf4, + 0x6e, 0x7c, 0xe5, 0x68, 0x5c, 0x3f, 0x7f, 0x27, 0x5d, 0x07, 0x9e, 0xa6, 0xdc, 0xfc, 0x6e, 0x51, + 0x5a, 0xbc, 0x7a, 0x32, 0xaa, 0x67, 0x89, 0xf1, 0x98, 0xb3, 0xc4, 0x87, 0x05, 0x7e, 0x2b, 0xec, + 0xd8, 0xf2, 0x62, 0x6c, 0x06, 0xab, 0xb9, 0xad, 0x09, 0x8a, 0x43, 0x99, 0xcf, 0xa6, 0x4e, 0xc0, + 0x09, 0x0d, 0xc8, 0x85, 0xd3, 0x02, 0x3c, 0x54, 0x99, 0xcf, 0x7c, 0xbf, 0x77, 0xdb, 0x09, 0xde, + 0x8e, 0xe4, 0x84, 0xc6, 0x33, 0x47, 0xe3, 0xfa, 0x69, 0xad, 0x1d, 0xeb, 0xf0, 0x68, 0x04, 0x4b, + 0x4a, 0x99, 0x91, 0x4f, 0x97, 0xb4, 0x99, 0xd7, 0x66, 0x2b, 0x6c, 0x0a, 0x85, 0x3c, 0x85, 0xdd, + 0x4a, 0x00, 0xe2, 0x09, 0x15, 0x72, 0x98, 0x7d, 0x2b, 0x1a, 0x66, 0x71, 0x96, 0x61, 0xb6, 0xac, + 0xf4, 0x61, 0xc6, 0xed, 0x58, 0x87, 0x47, 0xdf, 0x86, 0xa5, 0xdd, 0xc4, 0x65, 0xaa, 0x3c, 0xab, + 0x6f, 0x66, 0xca, 0x33, 0x52, 0xee, 0x61, 0xc5, 0x58, 0x93, 0x24, 0x3c, 0xa1, 0xc7, 0xfc, 0x71, + 0x01, 0xd0, 0xe4, 0x2d, 0x01, 0xba, 0xa6, 0xb9, 0xb2, 0xcb, 0x09, 0x57, 0xb6, 0xa4, 0x4a, 0x28, + 0x9e, 0xec, 0x21, 0xcc, 0x89, 0xfe, 0xce, 0x50, 0xbd, 0x90, 0x1d, 0x91, 0x60, 0x69, 0x46, 0x21, + 0x31, 0x59, 0x00, 0x2f, 0xed, 0x51, 0xda, 0xdd, 0x09, 0xe0, 0xd3, 0xac, 0x3c, 0x44, 0x45, 0x7b, + 0xf2, 0x20, 0x10, 0xb6, 0x20, 0x2d, 0xed, 0xfa, 0x89, 0x4a, 0xe8, 0xa2, 0xa8, 0xa0, 0xb4, 0x63, + 0x15, 0x5a, 0x4e, 0x54, 0xdf, 0xda, 0x95, 0xa6, 0xf5, 0x04, 0x13, 0xa5, 0x98, 0x95, 0xc4, 0x44, + 0x04, 0x2a, 0xd1, 0x3a, 0x4b, 0x43, 0x3a, 0x81, 0x82, 0x74, 0x0b, 0x8a, 0x91, 0xcd, 0x7f, 0x37, + 0x64, 0x90, 0xfe, 0xc0, 0xeb, 0x8f, 0x06, 0x04, 0x5d, 0x86, 0x82, 0x6b, 0x0d, 0x42, 0x9b, 0x89, + 0x6e, 0xff, 0xf8, 0xa3, 0x06, 0x4e, 0xe1, 0xb7, 0x7f, 0xfc, 0x4c, 0x98, 0x25, 0x8d, 0x8e, 0x35, + 0x24, 0x93, 0x4e, 0x59, 0xf8, 0x92, 0x98, 0xe8, 0x7d, 0x98, 0x1b, 0x78, 0x23, 0x37, 0x08, 0xcb, + 0x92, 0xaf, 0xcd, 0x86, 0x7e, 0x97, 0xc9, 0xc6, 0xe0, 0xfc, 0x27, 0xc5, 0x12, 0xd2, 0x7c, 0x07, + 0x96, 0x92, 0xbc, 0x68, 0x0d, 0x16, 0x3b, 0x84, 0x06, 0x8e, 0xcb, 0xe3, 0xd7, 0x6d, 0x2b, 0xd8, + 0x93, 0x63, 0x3f, 0x2f, 0x41, 0x16, 0x37, 0x74, 0x32, 0x4e, 0xf2, 0x9b, 0x7f, 0x99, 0x93, 0xc7, + 0x80, 0x3a, 0x42, 0xf4, 0xba, 0xb6, 0xfb, 0x9e, 0x4f, 0xec, 0xbe, 0x73, 0x13, 0x02, 0xca, 0x16, + 0xbc, 0x03, 0x73, 0x54, 0x2d, 0xfb, 0xbe, 0x90, 0x16, 0xe0, 0x8a, 0xd4, 0x55, 0x9b, 0x54, 0x1e, + 0xe3, 0xca, 0xbc, 0x59, 0x22, 0xa0, 0x07, 0xfc, 0xce, 0x43, 0x64, 0x9c, 0x72, 0xcb, 0xbd, 0x94, + 0x06, 0x17, 0xa5, 0xa8, 0x1a, 0xe2, 0x69, 0x79, 0x35, 0x22, 0x48, 0x38, 0x86, 0x42, 0x6f, 0x41, + 0xde, 0xa6, 0xce, 0x71, 0x15, 0xc2, 0xf5, 0xf6, 0x96, 0x86, 0xc5, 0xab, 0x16, 0xeb, 0xed, 0x2d, + 0xcc, 0x04, 0xcd, 0xdf, 0x2b, 0x81, 0x92, 0xa5, 0xa2, 0xb7, 0x60, 0x81, 0x12, 0xff, 0xc0, 0xb1, + 0xc9, 0x9a, 0x6d, 0xb3, 0x85, 0x91, 0xf3, 0x16, 0x3d, 0x13, 0x68, 0x6b, 0x54, 0x9c, 0xe0, 0xe6, + 0x6f, 0x30, 0x54, 0xab, 0xcc, 0xfe, 0x06, 0xe3, 0x71, 0xf6, 0x18, 0x57, 0x73, 0xf3, 0x4f, 0xbb, + 0x9a, 0xfb, 0x2d, 0x28, 0x53, 0x3d, 0x8c, 0xfa, 0x5a, 0xf6, 0x08, 0x59, 0x46, 0x2e, 0xd1, 0x45, + 0x53, 0x14, 0xae, 0x44, 0x98, 0x6c, 0x52, 0x64, 0x7e, 0x53, 0x9c, 0x6d, 0x52, 0x1e, 0x93, 0xd9, + 0x7c, 0x03, 0x2a, 0x3e, 0x11, 0x13, 0x44, 0xa5, 0x6f, 0x4a, 0x2d, 0xf1, 0x60, 0xc9, 0x84, 0xc9, + 0x87, 0x23, 0xc7, 0x27, 0x03, 0xe2, 0x06, 0x34, 0x4e, 0xe0, 0x43, 0x2a, 0xc5, 0x31, 0x1a, 0xfa, + 0x00, 0x60, 0x18, 0xdd, 0x17, 0xc8, 0xf2, 0x51, 0xe6, 0xb4, 0x41, 0xbf, 0x69, 0x88, 0xf3, 0x95, + 0xb8, 0x1d, 0x2b, 0xe8, 0xe8, 0x7d, 0xb8, 0x10, 0x67, 0xc0, 0x1b, 0xc4, 0xea, 0xf0, 0xe0, 0x4e, + 0x5e, 0xca, 0x89, 0x6b, 0xaa, 0xaf, 0x1e, 0x8d, 0xeb, 0x17, 0xd6, 0xa7, 0x31, 0xe1, 0xe9, 0xf2, + 0x68, 0x00, 0xf3, 0xae, 0xd7, 0x21, 0x6d, 0xd2, 0x27, 0x76, 0xe0, 0xf9, 0x32, 0x55, 0xcd, 0x52, + 0x4a, 0x12, 0x45, 0x4f, 0xab, 0x7f, 0x4f, 0x11, 0x17, 0x85, 0x31, 0xb5, 0x05, 0x6b, 0xf0, 0xe8, + 0x0d, 0x58, 0xe0, 0x4e, 0x6e, 0xc7, 0x1f, 0xd1, 0x80, 0x74, 0xd6, 0xd7, 0x78, 0x4a, 0x5b, 0x16, + 0x67, 0xe5, 0x5d, 0x8d, 0x82, 0x13, 0x9c, 0xe6, 0x1f, 0x1a, 0x90, 0xf2, 0x3c, 0x4b, 0x33, 0x7d, + 0xe3, 0x69, 0x9b, 0xfe, 0x0b, 0x9a, 0x8b, 0x53, 0x2f, 0x70, 0x34, 0xf7, 0x65, 0xfe, 0x85, 0x01, + 0x67, 0xd3, 0x6a, 0x6b, 0xcc, 0x06, 0x63, 0xbf, 0x66, 0xcc, 0x58, 0x66, 0x54, 0x6f, 0x7d, 0xd3, + 0x5c, 0xdb, 0x82, 0xe2, 0xe2, 0x37, 0x1c, 0x5f, 0xf6, 0x31, 0xf2, 0x45, 0x1b, 0x1a, 0x15, 0x27, + 0xb8, 0xcd, 0xef, 0x17, 0x60, 0x39, 0x25, 0xd7, 0x41, 0x9b, 0xf2, 0x56, 0x65, 0x86, 0x0b, 0xc1, + 0xe8, 0x00, 0xd6, 0x6e, 0x56, 0x60, 0x38, 0xea, 0xf7, 0x9f, 0xec, 0x62, 0x30, 0x94, 0xc7, 0x0a, + 0x56, 0x78, 0x4d, 0x92, 0x3f, 0xc1, 0x35, 0xc9, 0x1d, 0x40, 0xe4, 0xe3, 0xa1, 0x47, 0x89, 0xcc, + 0x59, 0x3d, 0x1e, 0xb7, 0x14, 0xb8, 0x0d, 0x46, 0x4f, 0xaf, 0x36, 0x27, 0x38, 0x70, 0x8a, 0x14, + 0x5a, 0x85, 0x4a, 0xd7, 0xf3, 0x6d, 0xc2, 0x7a, 0xc9, 0x3d, 0x97, 0x52, 0xf5, 0xbb, 0x15, 0x12, + 0x70, 0xcc, 0x83, 0xde, 0x8b, 0xab, 0xc2, 0x73, 0x99, 0x2f, 0x33, 0xc5, 0x98, 0xb9, 0xa3, 0x98, + 0x5e, 0x0e, 0x5e, 0x83, 0x45, 0x2e, 0xb0, 0xb6, 0xbd, 0x15, 0xde, 0x37, 0x95, 0xf4, 0xe8, 0xa0, + 0xa9, 0x93, 0x71, 0x92, 0xdf, 0xfc, 0x51, 0x11, 0x96, 0x53, 0x32, 0xfc, 0xe8, 0x8e, 0xcd, 0x78, + 0x92, 0x3b, 0xb6, 0x2f, 0xca, 0x12, 0x5e, 0x82, 0x92, 0xeb, 0xad, 0x5b, 0xf6, 0x1e, 0x91, 0xef, + 0x19, 0xa2, 0x29, 0xba, 0x27, 0x9a, 0x71, 0x48, 0x0f, 0x8d, 0xa6, 0x70, 0x02, 0xa3, 0x99, 0x79, + 0xa1, 0xdf, 0x0a, 0xab, 0x2c, 0x5d, 0xa7, 0x4f, 0x78, 0xac, 0x36, 0x97, 0xd8, 0x99, 0x1a, 0x15, + 0x27, 0xb8, 0xd1, 0xd7, 0xa1, 0x22, 0x96, 0xc7, 0xef, 0xd1, 0x0c, 0xb7, 0x81, 0x51, 0x67, 0x9a, + 0xa1, 0x10, 0x8e, 0xe5, 0xd1, 0x10, 0xce, 0xf3, 0x74, 0x80, 0xf9, 0xeb, 0x81, 0xf3, 0x6d, 0x11, + 0x0f, 0x8a, 0x67, 0x57, 0xa2, 0xce, 0x79, 0xe3, 0x68, 0x5c, 0x3f, 0xbf, 0x95, 0xce, 0xf2, 0x68, + 0x3a, 0x09, 0x4f, 0x83, 0x45, 0xdf, 0x80, 0xd2, 0x01, 0x8f, 0xa8, 0xc2, 0x9b, 0x89, 0xc6, 0x6c, + 0xd1, 0x71, 0xbc, 0x8a, 0xe2, 0x37, 0xc5, 0x21, 0x9e, 0xf9, 0x7d, 0x03, 0xd2, 0xaf, 0x07, 0xf5, + 0x39, 0x33, 0x9e, 0x70, 0xce, 0x9e, 0x8f, 0xed, 0x4a, 0x94, 0xf3, 0xab, 0x69, 0x36, 0x65, 0xfe, + 0x91, 0x01, 0xcb, 0x29, 0xf5, 0x8d, 0x5f, 0x8e, 0x23, 0xe9, 0xb3, 0x5c, 0xb2, 0x73, 0x9b, 0x07, + 0xc4, 0x0d, 0x4e, 0x76, 0x29, 0xb9, 0x29, 0xae, 0x02, 0x73, 0xb2, 0xaa, 0x9f, 0xa9, 0x38, 0xc1, + 0xeb, 0xc3, 0xfa, 0x1d, 0xe0, 0x13, 0x78, 0xee, 0xe9, 0x77, 0xce, 0x85, 0x2f, 0xfb, 0xce, 0xd9, + 0xfc, 0x2b, 0x03, 0x16, 0xf4, 0xbb, 0x4e, 0xf4, 0x55, 0xc8, 0x8f, 0x7c, 0x47, 0x4e, 0x6a, 0xd4, + 0xfb, 0x77, 0xf0, 0x16, 0x66, 0xed, 0x8c, 0xec, 0x93, 0xae, 0x5c, 0xb1, 0x88, 0x8c, 0x49, 0x17, + 0xb3, 0x76, 0x44, 0xa0, 0x3a, 0xf4, 0xbd, 0x8f, 0x0f, 0xc5, 0x39, 0x3f, 0xc3, 0xfb, 0xec, 0xed, + 0x58, 0x2a, 0x2e, 0x23, 0x2b, 0x8d, 0x58, 0xc5, 0xe5, 0x11, 0xd4, 0x64, 0x71, 0xec, 0x97, 0xc3, + 0x5c, 0xff, 0x2e, 0x07, 0x25, 0x69, 0x34, 0xe8, 0x43, 0x58, 0xe8, 0x69, 0xd3, 0x3b, 0x43, 0xb7, + 0x12, 0x77, 0xd0, 0x91, 0xcb, 0xd5, 0xdb, 0x71, 0x42, 0x01, 0xfa, 0x6d, 0x38, 0xd3, 0x73, 0x02, + 0x7d, 0x4c, 0x33, 0x54, 0x0e, 0x6e, 0x27, 0x65, 0x9b, 0x17, 0xa4, 0xe2, 0x33, 0x13, 0x24, 0x3c, + 0xa9, 0x09, 0xdd, 0x87, 0x82, 0x4f, 0xba, 0xb3, 0x3c, 0x72, 0x62, 0x7b, 0x8a, 0x74, 0xf9, 0x1e, + 0x8b, 0xa2, 0x2f, 0x4c, 0xba, 0x14, 0x73, 0x20, 0xf3, 0x77, 0xc5, 0x52, 0x27, 0x0a, 0x84, 0xff, + 0x13, 0x9f, 0x4c, 0xfc, 0x97, 0x01, 0x10, 0x77, 0xf6, 0xff, 0xdf, 0xda, 0x9a, 0x7f, 0x9e, 0x83, + 0x49, 0x46, 0xb6, 0x2f, 0x6c, 0x91, 0x3d, 0x1a, 0xa9, 0x9f, 0x29, 0x49, 0x2a, 0x7a, 0x08, 0x73, + 0x16, 0xff, 0xce, 0x67, 0x86, 0x1e, 0x0b, 0x55, 0xeb, 0x9e, 0x1b, 0xf8, 0x5e, 0xff, 0x1d, 0x4a, + 0x7c, 0xe5, 0xe3, 0x1a, 0x8e, 0x85, 0x25, 0x26, 0x22, 0x2c, 0x3d, 0x91, 0xdf, 0xea, 0xcc, 0xf0, + 0x4c, 0x7e, 0x52, 0x81, 0x92, 0xaa, 0x48, 0x38, 0x1c, 0x23, 0xcf, 0x70, 0x6f, 0x6d, 0x7e, 0xcf, + 0x80, 0xa5, 0x64, 0x35, 0x9d, 0xc9, 0xf3, 0x60, 0x63, 0x6b, 0x23, 0x79, 0x57, 0xb1, 0x25, 0x9a, + 0x71, 0x48, 0x47, 0x77, 0xa0, 0xc4, 0x82, 0x4e, 0x2c, 0xbd, 0x6d, 0xc6, 0x90, 0x95, 0x9f, 0xef, + 0xb7, 0x84, 0x1c, 0x0e, 0x01, 0xcc, 0x7f, 0x30, 0x00, 0x4d, 0xd6, 0x5b, 0xd1, 0x36, 0x9c, 0x15, + 0x5f, 0x62, 0xc8, 0x47, 0x04, 0x5b, 0x5a, 0xd7, 0x2e, 0xc9, 0xae, 0x9d, 0x6d, 0xa5, 0xf0, 0xe0, + 0x54, 0xc9, 0x28, 0xc8, 0xce, 0x9d, 0x3c, 0xc8, 0x7e, 0x01, 0xe6, 0x86, 0x6c, 0xae, 0x3a, 0x32, + 0x12, 0x8e, 0x56, 0x7c, 0x9b, 0xb7, 0x62, 0x49, 0x35, 0xff, 0x3a, 0x07, 0xb5, 0x69, 0xcf, 0xb0, + 0xbf, 0x80, 0x91, 0x3d, 0xd4, 0x46, 0xf6, 0x46, 0xe6, 0x37, 0x3f, 0x81, 0x4f, 0xac, 0xc1, 0x8e, + 0xd5, 0x3b, 0x3e, 0xc7, 0x1c, 0xc0, 0xa2, 0xa2, 0xf5, 0x84, 0x9f, 0xdc, 0x44, 0x39, 0x52, 0x4b, + 0x87, 0xc2, 0x49, 0x6c, 0xb3, 0x0d, 0x10, 0xbf, 0x23, 0xcd, 0x50, 0x83, 0x7e, 0x0e, 0x8a, 0x07, + 0x56, 0x7f, 0x14, 0x7e, 0xb9, 0x18, 0xbd, 0x06, 0x7f, 0xc0, 0x1a, 0xb1, 0xa0, 0x99, 0x7f, 0x9c, + 0x83, 0xaa, 0xf2, 0xce, 0xe9, 0x69, 0xa5, 0xdf, 0xcf, 0x40, 0xce, 0xa2, 0x3c, 0xdd, 0xa9, 0x88, + 0x8b, 0xe9, 0x35, 0x8a, 0x73, 0x16, 0x45, 0xef, 0x42, 0x71, 0x68, 0x05, 0x7b, 0xe1, 0x5b, 0xf6, + 0xab, 0xb3, 0xbd, 0xc2, 0x62, 0xe9, 0x49, 0x3c, 0x0e, 0xf6, 0x8b, 0x62, 0x81, 0x97, 0xc8, 0xf2, + 0xf2, 0x4f, 0x2f, 0xcb, 0x33, 0xbf, 0x6b, 0xc0, 0x62, 0xa2, 0x0f, 0xe8, 0x2a, 0x00, 0x8d, 0x7e, + 0xc9, 0x25, 0x88, 0x0a, 0x69, 0x31, 0x1f, 0x56, 0xb8, 0x9e, 0xb8, 0x60, 0xd2, 0x87, 0xf3, 0x53, + 0x8c, 0x93, 0xa5, 0x88, 0x6c, 0xc5, 0xe9, 0xd0, 0xb2, 0x49, 0xf2, 0xc9, 0xfe, 0xbd, 0x90, 0x80, + 0x63, 0x9e, 0xc8, 0x78, 0x72, 0xd3, 0x8c, 0xc7, 0xfc, 0x47, 0x03, 0x2e, 0x1d, 0x77, 0x19, 0xcc, + 0x92, 0x7e, 0x79, 0xe3, 0x1b, 0xa5, 0x99, 0x89, 0x2b, 0x81, 0x3b, 0x3a, 0x19, 0x27, 0xf9, 0xd1, + 0x75, 0xa8, 0x2a, 0x4d, 0xb2, 0x33, 0x51, 0x1c, 0xa9, 0x88, 0x63, 0x95, 0xef, 0x09, 0xc2, 0x78, + 0xf3, 0x6f, 0x0d, 0x38, 0x9b, 0x56, 0x39, 0x44, 0xbd, 0xf0, 0x1b, 0x0b, 0x91, 0xbb, 0x35, 0x4f, + 0x58, 0x81, 0x6c, 0xf0, 0x2f, 0x2d, 0x36, 0xdd, 0xc0, 0x3f, 0x4c, 0xff, 0xfa, 0xe2, 0xe2, 0x4d, + 0x80, 0x98, 0x07, 0x2d, 0x41, 0x7e, 0x9f, 0x1c, 0x8a, 0x89, 0xc3, 0xec, 0x4f, 0x74, 0x56, 0xdb, + 0xb4, 0x72, 0x97, 0xbe, 0x91, 0xbb, 0x69, 0xbc, 0x51, 0xfe, 0x83, 0x3f, 0xa9, 0x9f, 0xfa, 0xce, + 0x2f, 0x2e, 0x9f, 0x32, 0x7f, 0x60, 0x80, 0x1a, 0x65, 0xa3, 0x97, 0xa1, 0xb2, 0x17, 0x04, 0x43, + 0xde, 0x24, 0x9f, 0x74, 0xf1, 0x2b, 0x89, 0xb7, 0x77, 0x76, 0xb6, 0x79, 0x23, 0x8e, 0xe9, 0xa8, + 0x01, 0xc0, 0x7e, 0x50, 0xc1, 0x5d, 0x88, 0x9f, 0x61, 0x32, 0xee, 0xb6, 0x60, 0x57, 0x38, 0x44, + 0x32, 0x2a, 0x98, 0xc5, 0xa7, 0x7b, 0x32, 0x19, 0x15, 0x9c, 0x21, 0xcd, 0xfc, 0x33, 0x03, 0xce, + 0x4c, 0x3c, 0x21, 0x44, 0xdb, 0x51, 0xf8, 0x3d, 0x6b, 0xf1, 0x71, 0x4a, 0xa0, 0xfe, 0xc4, 0xbb, + 0xe8, 0x26, 0x9c, 0x15, 0x88, 0x5c, 0x6b, 0xbc, 0x85, 0x1e, 0xeb, 0x4e, 0xcd, 0x3f, 0x35, 0x00, + 0xe2, 0x72, 0x18, 0xda, 0x85, 0x79, 0xd1, 0x25, 0x2d, 0x8e, 0xcc, 0x3e, 0xc0, 0xb3, 0x52, 0xc5, + 0x7c, 0x5b, 0x41, 0xc1, 0x1a, 0x26, 0xdb, 0xd7, 0xbc, 0x0a, 0xcd, 0x77, 0x57, 0x4e, 0xdf, 0xd7, + 0x77, 0x43, 0x02, 0x8e, 0x79, 0xcc, 0x9f, 0xe7, 0x61, 0x39, 0xe5, 0xd1, 0xca, 0xff, 0xe9, 0xa2, + 0xea, 0x4b, 0x50, 0x12, 0xdf, 0x31, 0xd0, 0x64, 0x74, 0x27, 0x3e, 0x73, 0xa0, 0x38, 0xa4, 0xa3, + 0x2b, 0x50, 0x75, 0x5c, 0x5b, 0xdc, 0xb1, 0x58, 0x61, 0x31, 0x4d, 0xdc, 0x5f, 0xc7, 0xcd, 0x58, + 0xe5, 0xd1, 0xab, 0x6f, 0x73, 0x19, 0xaa, 0x6f, 0x5f, 0x60, 0xf9, 0xe9, 0x9b, 0x70, 0x66, 0x22, + 0xf4, 0xcd, 0x16, 0x07, 0x10, 0xfe, 0xf9, 0x7c, 0x22, 0x0e, 0x10, 0x5f, 0xcd, 0x0b, 0x9a, 0xf9, + 0x43, 0x03, 0x16, 0x12, 0x39, 0xc2, 0x89, 0x4a, 0x35, 0xf7, 0xd5, 0x52, 0xcd, 0xc9, 0xf2, 0x1b, + 0xad, 0x68, 0x63, 0xde, 0x81, 0xf4, 0x57, 0xf0, 0xc9, 0xc5, 0x34, 0x1e, 0xbf, 0x98, 0xe6, 0x4f, + 0x72, 0x50, 0x89, 0x1e, 0x0f, 0xa2, 0x57, 0xb5, 0x99, 0xbb, 0xa0, 0xce, 0xdc, 0xa3, 0x71, 0x5d, + 0x30, 0x2a, 0xd3, 0xf8, 0x3e, 0x54, 0xa2, 0xc7, 0xa7, 0x51, 0x29, 0x2a, 0x7b, 0x9c, 0x17, 0x59, + 0x4d, 0xf4, 0xa2, 0x15, 0xc7, 0x78, 0x2c, 0xf4, 0x0d, 0x5f, 0x87, 0xde, 0x75, 0xfa, 0x7d, 0x87, + 0xca, 0x0b, 0xb6, 0x3c, 0xbf, 0x60, 0x8b, 0x42, 0xdf, 0x8d, 0x14, 0x1e, 0x9c, 0x2a, 0x89, 0xb6, + 0xa1, 0x48, 0x03, 0x32, 0xa4, 0xb2, 0xe6, 0xfc, 0x72, 0xa6, 0x77, 0x95, 0x64, 0xc8, 0x53, 0xfa, + 0xc8, 0x44, 0x58, 0x0b, 0xc5, 0x02, 0xc8, 0xfc, 0x37, 0x03, 0xca, 0x21, 0x0b, 0x7a, 0x45, 0x9b, + 0xbc, 0x5a, 0x62, 0xf2, 0x38, 0xdf, 0xff, 0xda, 0xb9, 0x33, 0xc7, 0x06, 0x2c, 0xe8, 0x6f, 0x44, + 0x94, 0x42, 0x92, 0x71, 0x5c, 0x21, 0x09, 0xbd, 0x02, 0x65, 0xab, 0xdf, 0xf7, 0x3e, 0xda, 0x74, + 0x0f, 0x64, 0xf1, 0x36, 0xba, 0x7b, 0x5e, 0x93, 0xed, 0x38, 0xe2, 0x40, 0x07, 0xb0, 0x28, 0xe4, + 0xe2, 0xd7, 0xbf, 0xf9, 0xcc, 0x57, 0xa0, 0x69, 0xe7, 0x58, 0x73, 0x99, 0x45, 0x5e, 0x6d, 0x1d, + 0x13, 0x27, 0x95, 0x34, 0x6f, 0x7f, 0xfa, 0xf9, 0xca, 0xa9, 0x9f, 0x7d, 0xbe, 0x72, 0xea, 0xb3, + 0xcf, 0x57, 0x4e, 0x7d, 0xe7, 0x68, 0xc5, 0xf8, 0xf4, 0x68, 0xc5, 0xf8, 0xd9, 0xd1, 0x8a, 0xf1, + 0xd9, 0xd1, 0x8a, 0xf1, 0x2f, 0x47, 0x2b, 0xc6, 0xef, 0xff, 0xeb, 0xca, 0xa9, 0x6f, 0x3e, 0xfb, + 0xd8, 0x7f, 0x49, 0xf3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0x7d, 0x3c, 0xce, 0xb6, 0x46, + 0x00, 0x00, +} + +func (m *BinaryBuildRequestOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildRequestOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildRequestOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommitterEmail) + copy(dAtA[i:], m.CommitterEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterEmail))) + i-- + dAtA[i] = 0x42 + i -= len(m.CommitterName) + copy(dAtA[i:], m.CommitterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterName))) + i-- + dAtA[i] = 0x3a + i -= len(m.AuthorEmail) + copy(dAtA[i:], m.AuthorEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorEmail))) + i-- + dAtA[i] = 0x32 + i -= len(m.AuthorName) + copy(dAtA[i:], m.AuthorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorName))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0x1a + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BinaryBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BitbucketWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Build) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Build) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedBuildsHistoryLimit)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessfulBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulBuildsHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.RunPolicy) + copy(dAtA[i:], m.RunPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RunPolicy))) + i-- + dAtA[i] = 0x12 + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Triggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageChangeTriggers) > 0 { + for iNdEx := len(m.ImageChangeTriggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageChangeTriggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.LastVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *BuildList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BuildLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageLabels) > 0 { + for iNdEx := len(m.ImageLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PushSecret != nil { + { + size, err := m.PushSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildPostCommitSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildPostCommitSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildPostCommitSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x1a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SourceStrategyOptions != nil { + { + size, err := m.SourceStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.LastVersion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LastVersion)) + i-- + dAtA[i] = 0x30 + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TriggeredByImage != nil { + { + size, err := m.TriggeredByImage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConfigMaps) > 0 { + for iNdEx := len(m.ConfigMaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConfigMaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SourceSecret != nil { + { + size, err := m.SourceSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.ContextDir) + copy(dAtA[i:], m.ContextDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContextDir))) + i-- + dAtA[i] = 0x32 + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Dockerfile != nil { + i -= len(*m.Dockerfile) + copy(dAtA[i:], *m.Dockerfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Dockerfile))) + i-- + dAtA[i] = 0x1a + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + i -= len(m.LogSnippet) + copy(dAtA[i:], m.LogSnippet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LogSnippet))) + i-- + dAtA[i] = 0x62 + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.OutputDockerImageReference) + copy(dAtA[i:], m.OutputDockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputDockerImageReference))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x38 + if m.CompletionTimestamp != nil { + { + size, err := m.CompletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.StartTimestamp != nil { + { + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Cancelled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutputTo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutputTo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutputTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImageDigest) + copy(dAtA[i:], m.ImageDigest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageDigest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.JenkinsPipelineStrategy != nil { + { + size, err := m.JenkinsPipelineStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.CustomStrategy != nil { + { + size, err := m.CustomStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SourceStrategy != nil { + { + size, err := m.SourceStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DockerStrategy != nil { + { + size, err := m.DockerStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChangeBuild != nil { + { + size, err := m.ImageChangeBuild.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChange != nil { + { + size, err := m.ImageChange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationPath) + copy(dAtA[i:], m.DestinationPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MountTrustedCA != nil { + i-- + if *m.MountTrustedCA { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CompletionDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CompletionDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.PostCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigMapBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.BuildAPIVersion) + copy(dAtA[i:], m.BuildAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildAPIVersion))) + i-- + dAtA[i] = 0x3a + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.ExposeDockerSocket { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DockerBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ImageOptimizationPolicy != nil { + i -= len(*m.ImageOptimizationPolicy) + copy(dAtA[i:], *m.ImageOptimizationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageOptimizationPolicy))) + i-- + dAtA[i] = 0x42 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DockerfilePath) + copy(dAtA[i:], m.DockerfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerfilePath))) + i-- + dAtA[i] = 0x32 + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DockerStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoCache != nil { + i-- + if *m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProxyConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + i -= len(m.URI) + copy(dAtA[i:], m.URI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URI))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitHubWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitHubWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitHubWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitLabWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitLabWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitLabWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitRefInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRefInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRefInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitSourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitSourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitSourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Committer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Author.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromRef != nil { + { + size, err := m.FromRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTriggerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTriggerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTriggerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTriggerTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLabel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLabel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.As) > 0 { + for iNdEx := len(m.As) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.As[iNdEx]) + copy(dAtA[i:], m.As[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.As[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSourcePath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSourcePath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSourcePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + i -= len(m.SourcePath) + copy(dAtA[i:], m.SourcePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourcePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JenkinsPipelineBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JenkinsPipelineBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JenkinsPipelineBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Jenkinsfile) + copy(dAtA[i:], m.Jenkinsfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Jenkinsfile))) + i-- + dAtA[i] = 0x12 + i -= len(m.JenkinsfilePath) + copy(dAtA[i:], m.JenkinsfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JenkinsfilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + keysForItems := make([]string, 0, len(m)) + for k := range m { + keysForItems = append(keysForItems, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForItems) + for iNdEx := len(keysForItems) - 1; iNdEx >= 0; iNdEx-- { + v := m[string(keysForItems[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForItems[iNdEx]) + copy(dAtA[i:], keysForItems[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForItems[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProxyConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoProxy != nil { + i -= len(*m.NoProxy) + copy(dAtA[i:], *m.NoProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NoProxy))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPSProxy != nil { + i -= len(*m.HTTPSProxy) + copy(dAtA[i:], *m.HTTPSProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPSProxy))) + i-- + dAtA[i] = 0x22 + } + if m.HTTPProxy != nil { + i -= len(*m.HTTPProxy) + copy(dAtA[i:], *m.HTTPProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPProxy))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *SecretBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretLocalReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretLocalReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretLocalReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SecretSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Scripts) + copy(dAtA[i:], m.Scripts) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scripts))) + i-- + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceControlUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceControlUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceControlUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StageInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StageInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StepInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebHookTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebHookTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebHookTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretReference != nil { + { + size, err := m.SecretReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.AllowEnv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BinaryBuildRequestOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorEmail) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterEmail) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BinaryBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RunPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulBuildsHistoryLimit)) + } + if m.FailedBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedBuildsHistoryLimit)) + } + return n +} + +func (m *BuildConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LastVersion)) + if len(m.ImageChangeTriggers) > 0 { + for _, e := range m.ImageChangeTriggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BuildLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + n += 2 + return n +} + +func (m *BuildOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PushSecret != nil { + l = m.PushSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImageLabels) > 0 { + for _, e := range m.ImageLabels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildPostCommitSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TriggeredByImage != nil { + l = m.TriggeredByImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastVersion != nil { + n += 1 + sovGenerated(uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategyOptions != nil { + l = m.SourceStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Dockerfile != nil { + l = len(*m.Dockerfile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContextDir) + n += 1 + l + sovGenerated(uint64(l)) + if m.SourceSecret != nil { + l = m.SourceSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ConfigMaps) > 0 { + for _, e := range m.ConfigMaps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTimestamp != nil { + l = m.CompletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Duration)) + l = len(m.OutputDockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, e := range m.Stages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.LogSnippet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatusOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildStatusOutputTo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageDigest) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.DockerStrategy != nil { + l = m.DockerStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategy != nil { + l = m.SourceStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CustomStrategy != nil { + l = m.CustomStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JenkinsPipelineStrategy != nil { + l = m.JenkinsPipelineStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChangeBuild != nil { + l = m.ImageChangeBuild.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChange != nil { + l = m.ImageChange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildVolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DestinationPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.PostCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CompletionDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MountTrustedCA != nil { + n += 2 + } + return n +} + +func (m *CommonWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.BuildAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DockerBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.DockerfilePath) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ImageOptimizationPolicy != nil { + l = len(*m.ImageOptimizationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DockerStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NoCache != nil { + n += 2 + } + return n +} + +func (m *GenericWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GitBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ProxyConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitHubWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GitLabWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitRefInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitSourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Author.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Committer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageChangeCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.FromRef != nil { + l = m.FromRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageChangeTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ImageChangeTriggerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTriggerTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLabel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.As) > 0 { + for _, s := range m.As { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageSourcePath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JenkinsPipelineBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JenkinsfilePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Jenkinsfile) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNodeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for k, v := range m { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ProxyConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NoProxy != nil { + l = len(*m.NoProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecretBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretLocalReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SecretSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Scripts) + n += 1 + l + sovGenerated(uint64(l)) + if m.Incremental != nil { + n += 2 + } + n += 2 + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SourceControlUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Email) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SourceStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Incremental != nil { + n += 2 + } + return n +} + +func (m *StageInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StepInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + return n +} + +func (m *WebHookTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretReference != nil { + l = m.SecretReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BinaryBuildRequestOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildRequestOptions{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`, + `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`, + `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`, + `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`, + `}`, + }, "") + return s +} +func (this *BinaryBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildSource{`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Build) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Build{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BuildConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggers := "[]BuildTriggerPolicy{" + for _, f := range this.Triggers { + repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggers += "}" + s := strings.Join([]string{`&BuildConfigSpec{`, + `Triggers:` + repeatedStringForTriggers + `,`, + `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulBuildsHistoryLimit:` + valueToStringGenerated(this.SuccessfulBuildsHistoryLimit) + `,`, + `FailedBuildsHistoryLimit:` + valueToStringGenerated(this.FailedBuildsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageChangeTriggers := "[]ImageChangeTriggerStatus{" + for _, f := range this.ImageChangeTriggers { + repeatedStringForImageChangeTriggers += strings.Replace(strings.Replace(f.String(), "ImageChangeTriggerStatus", "ImageChangeTriggerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImageChangeTriggers += "}" + s := strings.Join([]string{`&BuildConfigStatus{`, + `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`, + `ImageChangeTriggers:` + repeatedStringForImageChangeTriggers + `,`, + `}`, + }, "") + return s +} +func (this *BuildList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Build{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Build", "Build", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLog{`, + `}`, + }, "") + return s +} +func (this *BuildLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `}`, + }, "") + return s +} +func (this *BuildOutput) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageLabels := "[]ImageLabel{" + for _, f := range this.ImageLabels { + repeatedStringForImageLabels += strings.Replace(strings.Replace(f.String(), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + "," + } + repeatedStringForImageLabels += "}" + s := strings.Join([]string{`&BuildOutput{`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImageLabels:` + repeatedStringForImageLabels + `,`, + `}`, + }, "") + return s +} +func (this *BuildPostCommitSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildPostCommitSpec{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *BuildRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `SourceStrategyOptions:` + strings.Replace(this.SourceStrategyOptions.String(), "SourceStrategyOptions", "SourceStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageSource{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageSource", "ImageSource", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForSecrets := "[]SecretBuildSource{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForConfigMaps := "[]ConfigMapBuildSource{" + for _, f := range this.ConfigMaps { + repeatedStringForConfigMaps += strings.Replace(strings.Replace(f.String(), "ConfigMapBuildSource", "ConfigMapBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForConfigMaps += "}" + s := strings.Join([]string{`&BuildSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitBuildSource", "GitBuildSource", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`, + `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ConfigMaps:` + repeatedStringForConfigMaps + `,`, + `}`, + }, "") + return s +} +func (this *BuildSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildSpec{`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStages := "[]StageInfo{" + for _, f := range this.Stages { + repeatedStringForStages += strings.Replace(strings.Replace(f.String(), "StageInfo", "StageInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForStages += "}" + repeatedStringForConditions := "[]BuildCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "BuildCondition", "BuildCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&BuildStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "v1.Time", 1) + `,`, + `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "v1.Time", 1) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildStatusOutput", "BuildStatusOutput", 1), `&`, ``, 1) + `,`, + `Stages:` + repeatedStringForStages + `,`, + `LogSnippet:` + fmt.Sprintf("%v", this.LogSnippet) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutput{`, + `To:` + strings.Replace(this.To.String(), "BuildStatusOutputTo", "BuildStatusOutputTo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutputTo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutputTo{`, + `ImageDigest:` + fmt.Sprintf("%v", this.ImageDigest) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerStrategy:` + strings.Replace(this.DockerStrategy.String(), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`, + `SourceStrategy:` + strings.Replace(this.SourceStrategy.String(), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`, + `CustomStrategy:` + strings.Replace(this.CustomStrategy.String(), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`, + `JenkinsPipelineStrategy:` + strings.Replace(this.JenkinsPipelineStrategy.String(), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerCause{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`, + `ImageChangeBuild:` + strings.Replace(this.ImageChangeBuild.String(), "ImageChangeCause", "ImageChangeCause", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "GitLabWebHookCause", "GitLabWebHookCause", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "BitbucketWebHookCause", "BitbucketWebHookCause", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `ImageChange:` + strings.Replace(this.ImageChange.String(), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolume) String() string { + if this == nil { + return "nil" + } + repeatedStringForMounts := "[]BuildVolumeMount{" + for _, f := range this.Mounts { + repeatedStringForMounts += strings.Replace(strings.Replace(f.String(), "BuildVolumeMount", "BuildVolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForMounts += "}" + s := strings.Join([]string{`&BuildVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildVolumeSource", "BuildVolumeSource", 1), `&`, ``, 1) + `,`, + `Mounts:` + repeatedStringForMounts + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeMount{`, + `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "v11.SecretVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "v11.ConfigMapVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "v11.CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonSpec{`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v11.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, + `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`, + `MountTrustedCA:` + valueToStringGenerated(this.MountTrustedCA) + `,`, + `}`, + }, "") + return s +} +func (this *CommonWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapBuildSource{`, + `ConfigMap:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *CustomBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForSecrets := "[]SecretSpec{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + s := strings.Join([]string{`&CustomBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DockerBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&DockerBuildStrategy{`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `ImageOptimizationPolicy:` + valueToStringGenerated(this.ImageOptimizationPolicy) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *DockerStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + s := strings.Join([]string{`&DockerStrategyOptions{`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `NoCache:` + valueToStringGenerated(this.NoCache) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookEvent) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GenericWebHookEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitInfo", "GitInfo", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitBuildSource{`, + `URI:` + fmt.Sprintf("%v", this.URI) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitHubWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitHubWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GitInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]GitRefInfo{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(strings.Replace(f.String(), "GitRefInfo", "GitRefInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForRefs += "}" + s := strings.Join([]string{`&GitInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `}`, + }, "") + return s +} +func (this *GitLabWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitLabWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitRefInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRefInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitSourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitSourceRevision{`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeCause{`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTrigger{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTriggerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTriggerStatus{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ImageStreamTagReference", "ImageStreamTagReference", 1), `&`, ``, 1) + `,`, + `LastTriggerTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTriggerTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLabel{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]ImageSourcePath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&ImageSource{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `As:` + fmt.Sprintf("%v", this.As) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSourcePath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSourcePath{`, + `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamTagReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *JenkinsPipelineBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`, + `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`, + `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `}`, + }, "") + return s +} +func (this *ProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyConfig{`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretBuildSource{`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *SecretLocalReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretLocalReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `SecretSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretSource), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `}`, + }, "") + return s +} +func (this *SourceBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&SourceBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *SourceControlUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceControlUser{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `}`, + }, "") + return s +} +func (this *SourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceRevision{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitSourceRevision", "GitSourceRevision", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceStrategyOptions{`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `}`, + }, "") + return s +} +func (this *StageInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]StepInfo{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "StepInfo", "StepInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + s := strings.Join([]string{`&StageInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `Steps:` + repeatedStringForSteps + `,`, + `}`, + }, "") + return s +} +func (this *StepInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StepInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `}`, + }, "") + return s +} +func (this *WebHookTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebHookTrigger{`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`, + `SecretReference:` + strings.Replace(this.SecretReference.String(), "SecretLocalReference", "SecretLocalReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BinaryBuildRequestOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinaryBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Build) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Build: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BuildConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Triggers = append(m.Triggers, BuildTriggerPolicy{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunPolicy = BuildRunPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulBuildsHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedBuildsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + m.LastVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeTriggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageChangeTriggers = append(m.ImageChangeTriggers, ImageChangeTriggerStatus{}) + if err := m.ImageChangeTriggers[len(m.ImageChangeTriggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Build{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.ObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PushSecret == nil { + m.PushSecret = &v11.LocalObjectReference{} + } + if err := m.PushSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLabels = append(m.ImageLabels, ImageLabel{}) + if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildPostCommitSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TriggeredByImage == nil { + m.TriggeredByImage = &v11.ObjectReference{} + } + if err := m.TriggeredByImage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LastVersion = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategyOptions == nil { + m.SourceStrategyOptions = &SourceStrategyOptions{} + } + if err := m.SourceStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Dockerfile = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitBuildSource{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageSource{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContextDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceSecret == nil { + m.SourceSecret = &v11.LocalObjectReference{} + } + if err := m.SourceSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretBuildSource{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMaps = append(m.ConfigMaps, ConfigMapBuildSource{}) + if err := m.ConfigMaps[len(m.ConfigMaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = BuildPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancelled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTimestamp == nil { + m.StartTimestamp = &v1.Time{} + } + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTimestamp == nil { + m.CompletionTimestamp = &v1.Time{} + } + if err := m.CompletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= time.Duration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutputDockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &v11.ObjectReference{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, StageInfo{}) + if err := m.Stages[len(m.Stages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSnippet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSnippet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, BuildCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &BuildStatusOutputTo{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutputTo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutputTo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutputTo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageDigest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageDigest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategy == nil { + m.DockerStrategy = &DockerBuildStrategy{} + } + if err := m.DockerStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategy == nil { + m.SourceStrategy = &SourceBuildStrategy{} + } + if err := m.SourceStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomStrategy == nil { + m.CustomStrategy = &CustomBuildStrategy{} + } + if err := m.CustomStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JenkinsPipelineStrategy == nil { + m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{} + } + if err := m.JenkinsPipelineStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &GenericWebHookCause{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &GitHubWebHookCause{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeBuild == nil { + m.ImageChangeBuild = &ImageChangeCause{} + } + if err := m.ImageChangeBuild.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &GitLabWebHookCause{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &BitbucketWebHookCause{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &WebHookTrigger{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &WebHookTrigger{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChange == nil { + m.ImageChange = &ImageChangeTrigger{} + } + if err := m.ImageChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &WebHookTrigger{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &WebHookTrigger{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, BuildVolumeMount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildVolumeSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &v11.ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &v11.CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PostCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CompletionDeadlineSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = OptionalNodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountTrustedCA", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MountTrustedCA = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExposeDockerSocket = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretSpec{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCache = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageOptimizationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ImageOptimizationPolicy(dAtA[iNdEx:postIndex]) + m.ImageOptimizationPolicy = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NoCache = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitInfo{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProxyConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitHubWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, GitRefInfo{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitLabWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitLabWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitLabWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRefInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRefInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRefInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitSourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Author.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Committer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromRef == nil { + m.FromRef = &v11.ObjectReference{} + } + if err := m.FromRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTriggerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggerTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTriggerTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLabel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, ImageSourcePath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field As", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.As = append(m.As, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSourcePath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JenkinsPipelineBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JenkinsfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Jenkinsfile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if *m == nil { + *m = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + (*m)[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NoProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretLocalReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretLocalReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretLocalReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scripts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceControlUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitSourceRevision{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StageInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StageInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StageInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StageName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, StepInfo{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StepName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebHookTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEnv = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretReference == nil { + m.SecretReference = &SecretLocalReference{} + } + if err := m.SecretReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.proto b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.proto new file mode 100644 index 000000000..7a18a7f02 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/generated.proto @@ -0,0 +1,1254 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package git.colasdn.top.openshift.api.build.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/build/v1"; + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BinaryBuildRequestOptions { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + optional string asFile = 2; + + // revision.commit is the value identifying a specific commit + optional string revisionCommit = 3; + + // revision.message is the description of a specific commit + optional string revisionMessage = 4; + + // revision.authorName of the source control user + optional string revisionAuthorName = 5; + + // revision.authorEmail of the source control user + optional string revisionAuthorEmail = 6; + + // revision.committerName of the source control user + optional string revisionCommitterName = 7; + + // revision.committerEmail of the source control user + optional string revisionCommitterEmail = 8; +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +message BinaryBuildSource { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + optional string asFile = 1; +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +message BitbucketWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Build { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is all the inputs used to execute the build. + optional BuildSpec spec = 2; + + // status is the current status of the build. + // +optional + optional BuildStatus status = 3; +} + +// BuildCondition describes the state of a build at a certain point. +message BuildCondition { + // type of build condition. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + optional BuildConfigSpec spec = 2; + + // status holds any relevant information about a build config + // +optional + optional BuildConfigStatus status = 3; +} + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of build configs + repeated BuildConfig items = 2; +} + +// BuildConfigSpec describes when and how builds are created +message BuildConfigSpec { + // triggers determine how new Builds can be launched from a BuildConfig. If + // no triggers are defined, a new build can only occur as a result of an + // explicit client build creation. + // +optional + repeated BuildTriggerPolicy triggers = 1; + + // runPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + optional string runPolicy = 2; + + // CommonSpec is the desired build specification + optional CommonSpec commonSpec = 3; + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + optional int32 successfulBuildsHistoryLimit = 4; + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + optional int32 failedBuildsHistoryLimit = 5; +} + +// BuildConfigStatus contains current state of the build config object. +message BuildConfigStatus { + // lastVersion is used to inform about number of last triggered build. + // +optional + optional int64 lastVersion = 1; + + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional + repeated ImageChangeTriggerStatus imageChangeTriggers = 2; +} + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of builds + repeated Build items = 2; +} + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLog { +} + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLogOptions { + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // previous returns previous build logs. Defaults to false. + optional bool previous = 3; + + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // nowait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the build for which to view logs. + optional int64 version = 10; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 11; +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +message BuildOutput { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + optional .k8s.io.api.core.v1.ObjectReference to = 1; + + // pushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + optional .k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + repeated ImageLabel imageLabels = 3; +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +message BuildPostCommitSpec { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + repeated string command = 1; + + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + repeated string args = 2; + + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + optional string script = 3; +} + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildRequest { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // revision is the information from the source for a specific repo snapshot. + optional SourceRevision revision = 2; + + // triggeredByImage is the Image that triggered this build. + optional .k8s.io.api.core.v1.ObjectReference triggeredByImage = 3; + + // from is the reference to the ImageStreamTag that triggered the build. + optional .k8s.io.api.core.v1.ObjectReference from = 4; + + // binary indicates a request to build from a binary provided to the builder + optional BinaryBuildSource binary = 5; + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + optional int64 lastVersion = 6; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 7; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 8; + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 9; + + // sourceStrategyOptions contains additional source-strategy specific options for the build + optional SourceStrategyOptions sourceStrategyOptions = 10; +} + +// BuildSource is the SCM used for the build. +message BuildSource { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + optional BinaryBuildSource binary = 2; + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + optional string dockerfile = 3; + + // git contains optional information about git build source + optional GitBuildSource git = 4; + + // images describes a set of images to be used to provide source for the build + repeated ImageSource images = 5; + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + optional string contextDir = 6; + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + optional .k8s.io.api.core.v1.LocalObjectReference sourceSecret = 7; + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + repeated SecretBuildSource secrets = 8; + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + repeated ConfigMapBuildSource configMaps = 9; +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +message BuildSpec { + // CommonSpec is the information that represents a build + optional CommonSpec commonSpec = 1; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 2; +} + +// BuildStatus contains the status of a build +message BuildStatus { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional + optional string phase = 1; + + // cancelled describes if a cancel event was triggered for the build. + // +optional + optional bool cancelled = 2; + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional + optional string reason = 3; + + // message is a human-readable message indicating details about why the build has this status. + // +optional + optional string message = 4; + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; + + // duration contains time.Duration object describing build time. + // +optional + optional int64 duration = 7; + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + // +optional + optional string outputDockerImageReference = 8; + + // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional + optional .k8s.io.api.core.v1.ObjectReference config = 9; + + // output describes the container image the build has produced. + // +optional + optional BuildStatusOutput output = 10; + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + // +optional + repeated StageInfo stages = 11; + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional + optional string logSnippet = 12; + + // conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated BuildCondition conditions = 13; +} + +// BuildStatusOutput contains the status of the built image. +message BuildStatusOutput { + // to describes the status of the built image being pushed to a registry. + optional BuildStatusOutputTo to = 1; +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +message BuildStatusOutputTo { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + optional string imageDigest = 1; +} + +// BuildStrategy contains the details of how to perform a build. +message BuildStrategy { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // dockerStrategy holds the parameters to the container image build strategy. + optional DockerBuildStrategy dockerStrategy = 2; + + // sourceStrategy holds the parameters to the Source build strategy. + optional SourceBuildStrategy sourceStrategy = 3; + + // customStrategy holds the parameters to the Custom build strategy + optional CustomBuildStrategy customStrategy = 4; + + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +message BuildTriggerCause { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + optional string message = 1; + + // genericWebHook holds data about a builds generic webhook trigger. + optional GenericWebHookCause genericWebHook = 2; + + // githubWebHook represents data for a GitHub webhook that fired a + // specific build. + optional GitHubWebHookCause githubWebHook = 3; + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + optional ImageChangeCause imageChangeBuild = 4; + + // gitlabWebHook represents data for a GitLab webhook that fired a specific + // build. + optional GitLabWebHookCause gitlabWebHook = 5; + + // bitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + optional BitbucketWebHookCause bitbucketWebHook = 6; +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +message BuildTriggerPolicy { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + optional string type = 1; + + // github contains the parameters for a GitHub webhook type of trigger + optional WebHookTrigger github = 2; + + // generic contains the parameters for a Generic webhook type of trigger + optional WebHookTrigger generic = 3; + + // imageChange contains parameters for an ImageChange type of trigger + optional ImageChangeTrigger imageChange = 4; + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + optional WebHookTrigger gitlab = 5; + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + optional WebHookTrigger bitbucket = 6; +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +message BuildVolume { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + optional string name = 1; + + // source represents the location and type of the mounted volume. + // +required + optional BuildVolumeSource source = 2; + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + repeated BuildVolumeMount mounts = 3; +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +message BuildVolumeMount { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + optional string destinationPath = 1; +} + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +message BuildVolumeSource { + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + optional string type = 1; + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional .k8s.io.api.core.v1.SecretVolumeSource secret = 2; + + // configMap represents a ConfigMap that should populate this volume + // +optional + optional .k8s.io.api.core.v1.ConfigMapVolumeSource configMap = 3; + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + optional .k8s.io.api.core.v1.CSIVolumeSource csi = 4; +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +message CommonSpec { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + optional string serviceAccount = 1; + + // source describes the SCM in use. + optional BuildSource source = 2; + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + optional SourceRevision revision = 3; + + // strategy defines how to perform a build. + optional BuildStrategy strategy = 4; + + // output describes the container image the Strategy should produce. + optional BuildOutput output = 5; + + // resources computes resource requirements to execute the build. + optional .k8s.io.api.core.v1.ResourceRequirements resources = 6; + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + optional BuildPostCommitSpec postCommit = 7; + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + optional int64 completionDeadlineSeconds = 8; + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + optional OptionalNodeSelector nodeSelector = 9; + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + optional bool mountTrustedCA = 10; +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +message CommonWebHookCause { + // revision is the git source revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +message ConfigMapBuildSource { + // configMap is a reference to an existing configmap that you want to use in your + // build. + optional .k8s.io.api.core.v1.LocalObjectReference configMap = 1; + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// CustomBuildStrategy defines input parameters specific to Custom build. +message CustomBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + optional bool exposeDockerSocket = 4; + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + optional bool forcePull = 5; + + // secrets is a list of additional secrets that will be included in the build pod + repeated SecretSpec secrets = 6; + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + optional string buildAPIVersion = 7; +} + +// DockerBuildStrategy defines input parameters specific to container image build. +message DockerBuildStrategy { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + optional bool noCache = 3; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 4; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 5; + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + optional string dockerfilePath = 6; + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + repeated .k8s.io.api.core.v1.EnvVar buildArgs = 7; + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + optional string imageOptimizationPolicy = 8; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// DockerStrategyOptions contains extra strategy options for container image builds +message DockerStrategyOptions { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + repeated .k8s.io.api.core.v1.EnvVar buildArgs = 1; + + // noCache overrides the docker-strategy noCache option in the build config + optional bool noCache = 2; +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +message GenericWebHookCause { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +message GenericWebHookEvent { + // type is the type of source repository + // +k8s:conversion-gen=false + optional string type = 1; + + // git is the git information if the Type is BuildSourceGit + optional GitInfo git = 2; + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 4; +} + +// GitBuildSource defines the parameters of a Git SCM +message GitBuildSource { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + optional string uri = 1; + + // ref is the branch/tag/ref to build. + optional string ref = 2; + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + optional ProxyConfig proxyConfig = 3; +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +message GitHubWebHookCause { + // revision is the git revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GitInfo is the aggregated git information for a generic webhook post +message GitInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; + + // refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + repeated GitRefInfo refs = 3; +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +message GitLabWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// GitRefInfo is a single ref +message GitRefInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; +} + +// GitSourceRevision is the commit information from a git source for a build +message GitSourceRevision { + // commit is the commit hash identifying a specific commit + optional string commit = 1; + + // author is the author of a specific commit + optional SourceControlUser author = 2; + + // committer is the committer of a specific commit + optional SourceControlUser committer = 3; + + // message is the description of a specific commit + optional string message = 4; +} + +// ImageChangeCause contains information about the image that triggered a +// build +message ImageChangeCause { + // imageID is the ID of the image that triggered a new build. + optional string imageID = 1; + + // fromRef contains detailed information about an image that triggered a + // build. + optional .k8s.io.api.core.v1.ObjectReference fromRef = 2; +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +message ImageChangeTrigger { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + optional string lastTriggeredImageID = 1; + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + optional .k8s.io.api.core.v1.ObjectReference from = 2; + + // paused is true if this trigger is temporarily disabled. Optional. + optional bool paused = 3; +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +message ImageChangeTriggerStatus { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + optional string lastTriggeredImageID = 1; + + // from is the ImageStreamTag that is the source of the trigger. + optional ImageStreamTagReference from = 2; + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTriggerTime = 3; +} + +// ImageLabel represents a label applied to the resulting image. +message ImageLabel { + // name defines the name of the label. It must have non-zero length. + optional string name = 1; + + // value defines the literal value of the label. + optional string value = 2; +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +message ImageSource { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + repeated string as = 4; + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + repeated ImageSourcePath paths = 2; + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 3; +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +message ImageSourcePath { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + optional string sourcePath = 1; + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + optional string destinationDir = 2; +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +message ImageStreamTagReference { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + optional string namespace = 1; + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + optional string name = 2; +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +message JenkinsPipelineBuildStrategy { + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + optional string jenkinsfilePath = 1; + + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + optional string jenkinsfile = 2; + + // env contains additional environment variables you want to pass into a build pipeline. + repeated .k8s.io.api.core.v1.EnvVar env = 3; +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNodeSelector { + // items, if empty, will result in an empty map + + map items = 1; +} + +// ProxyConfig defines what proxies to use for an operation +message ProxyConfig { + // httpProxy is a proxy used to reach the git repository over http + optional string httpProxy = 3; + + // httpsProxy is a proxy used to reach the git repository over https + optional string httpsProxy = 4; + + // noProxy is the list of domains for which the proxy should not be used + optional string noProxy = 5; +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +message SecretBuildSource { + // secret is a reference to an existing secret that you want to use in your + // build. + optional .k8s.io.api.core.v1.LocalObjectReference secret = 1; + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// SecretLocalReference contains information that points to the local secret being used +message SecretLocalReference { + // name is the name of the resource in the same namespace being referenced + optional string name = 1; +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +message SecretSpec { + // secretSource is a reference to the secret + optional .k8s.io.api.core.v1.LocalObjectReference secretSource = 1; + + // mountPath is the path at which to mount the secret + optional string mountPath = 2; +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +message SourceBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // scripts is the location of Source scripts + optional string scripts = 4; + + // incremental flag forces the Source build to do incremental builds if true. + optional bool incremental = 5; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 6; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// SourceControlUser defines the identity of a user of source control +message SourceControlUser { + // name of the source control user + optional string name = 1; + + // email of the source control user + optional string email = 2; +} + +// SourceRevision is the revision or commit information from the source for the build +message SourceRevision { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + optional string type = 1; + + // git contains information about git-based build source + optional GitSourceRevision git = 2; +} + +// SourceStrategyOptions contains extra strategy options for Source builds +message SourceStrategyOptions { + // incremental overrides the source-strategy incremental option in the build config + optional bool incremental = 1; +} + +// StageInfo contains details about a build stage. +message StageInfo { + // name is a unique identifier for each build stage that occurs. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + optional int64 durationMilliseconds = 3; + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + repeated StepInfo steps = 4; +} + +// StepInfo contains details about a build step. +message StepInfo { + // name is a unique identifier for each build step. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + optional int64 durationMilliseconds = 3; +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +message WebHookTrigger { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + optional string secret = 1; + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + optional bool allowEnv = 2; + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + optional SecretLocalReference secretReference = 3; +} + diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/legacy.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/legacy.go new file mode 100644 index 000000000..a74627d2c --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/register.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/register.go new file mode 100644 index 000000000..16f68ea8c --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "build.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + // This is needed for webhooks + &corev1.PodProxyOptions{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/types.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/types.go new file mode 100644 index 000000000..b64aacc06 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/types.go @@ -0,0 +1,1484 @@ +package v1 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:method=UpdateDetails,verb=update,subresource=details +// +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Build struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is all the inputs used to execute the build. + Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current status of the build. + // +optional + Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +type BuildSpec struct { + // CommonSpec is the information that represents a build + CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,2,rep,name=triggeredBy"` +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNodeSelector map[string]string + +func (t OptionalNodeSelector) String() string { + return fmt.Sprintf("%v", map[string]string(t)) +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +type CommonSpec struct { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` + + // source describes the SCM in use. + Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` + + // strategy defines how to perform a build. + Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` + + // output describes the container image the Strategy should produce. + Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` + + // resources computes resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + MountTrustedCA *bool `json:"mountTrustedCA,omitempty" protobuf:"varint,10,opt,name=mountTrustedCA"` +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +type BuildTriggerCause struct { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + + // genericWebHook holds data about a builds generic webhook trigger. + GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` + + // githubWebHook represents data for a GitHub webhook that fired a + //specific build. + GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` + + // gitlabWebHook represents data for a GitLab webhook that fired a specific + // build. + GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` + + // bitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +type GenericWebHookCause struct { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +type GitHubWebHookCause struct { + // revision is the git revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +type CommonWebHookCause struct { + // revision is the git source revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +type GitLabWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +type BitbucketWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// ImageChangeCause contains information about the image that triggered a +// build +type ImageChangeCause struct { + // imageID is the ID of the image that triggered a new build. + ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` + + // fromRef contains detailed information about an image that triggered a + // build. + FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` +} + +// BuildStatus contains the status of a build +type BuildStatus struct { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional + Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` + + // cancelled describes if a cancel event was triggered for the build. + // +optional + Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` + + // message is a human-readable message indicating details about why the build has this status. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` + + // duration contains time.Duration object describing build time. + // +optional + Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + // +optional + OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` + + // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional + Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` + + // output describes the container image the build has produced. + // +optional + Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + // +optional + Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional + LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` + + // conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` +} + +// StageInfo contains details about a build stage. +type StageInfo struct { + // name is a unique identifier for each build stage that occurs. + Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"` +} + +// StageName is the unique identifier for each build stage. +type StageName string + +// Valid values for StageName +const ( + // StageFetchInputs fetches any inputs such as source code. + StageFetchInputs StageName = "FetchInputs" + + // StagePullImages pulls any images that are needed such as + // base images or input images. + StagePullImages StageName = "PullImages" + + // StageBuild performs the steps necessary to build the image. + StageBuild StageName = "Build" + + // StagePostCommit executes any post commit steps. + StagePostCommit StageName = "PostCommit" + + // StagePushImage pushes the image to the node. + StagePushImage StageName = "PushImage" +) + +// StepInfo contains details about a build step. +type StepInfo struct { + // name is a unique identifier for each build step. + Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` +} + +// StepName is a unique identifier for each build step. +type StepName string + +// Valid values for StepName +const ( + // StepExecPostCommitHook executes the buildconfigs post commit hook. + StepExecPostCommitHook StepName = "RunPostCommitHook" + + // StepFetchGitSource fetches source code for the build. + StepFetchGitSource StepName = "FetchGitSource" + + // StepPullBaseImage pulls a base image for the build. + StepPullBaseImage StepName = "PullBaseImage" + + // StepPullInputImage pulls an input image for the build. + StepPullInputImage StepName = "PullInputImage" + + // StepPushImage pushes an image to the registry. + StepPushImage StepName = "PushImage" + + // StepPushDockerImage pushes a container image to the registry. + StepPushDockerImage StepName = "PushDockerImage" + + //StepDockerBuild performs the container image build + StepDockerBuild StepName = "DockerBuild" +) + +// BuildPhase represents the status of a build at a point in time. +type BuildPhase string + +// Valid values for BuildPhase. +const ( + // BuildPhaseNew is automatically assigned to a newly created build. + BuildPhaseNew BuildPhase = "New" + + // BuildPhasePending indicates that a pod name has been assigned and a build is + // about to start running. + BuildPhasePending BuildPhase = "Pending" + + // BuildPhaseRunning indicates that a pod has been created and a build is running. + BuildPhaseRunning BuildPhase = "Running" + + // BuildPhaseComplete indicates that a build has been successful. + BuildPhaseComplete BuildPhase = "Complete" + + // BuildPhaseFailed indicates that a build has executed and failed. + BuildPhaseFailed BuildPhase = "Failed" + + // BuildPhaseError indicates that an error prevented the build from executing. + BuildPhaseError BuildPhase = "Error" + + // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. + BuildPhaseCancelled BuildPhase = "Cancelled" +) + +type BuildConditionType string + +// BuildCondition describes the state of a build at a certain point. +type BuildCondition struct { + // type of build condition. + Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// StatusReason is a brief CamelCase string that describes a temporary or +// permanent build error condition, meant for machine parsing and tidy display +// in the CLI. +type StatusReason string + +// BuildStatusOutput contains the status of the built image. +type BuildStatusOutput struct { + // to describes the status of the built image being pushed to a registry. + To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +type BuildStatusOutputTo struct { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"` +} + +// BuildSourceType is the type of SCM used. +type BuildSourceType string + +// Valid values for BuildSourceType. +const ( + //BuildSourceGit instructs a build to use a Git source control repository as the build input. + BuildSourceGit BuildSourceType = "Git" + // BuildSourceDockerfile uses a Dockerfile as the start of a build + BuildSourceDockerfile BuildSourceType = "Dockerfile" + // BuildSourceBinary indicates the build will accept a Binary file as input. + BuildSourceBinary BuildSourceType = "Binary" + // BuildSourceImage indicates the build will accept an image as input + BuildSourceImage BuildSourceType = "Image" + // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) + BuildSourceNone BuildSourceType = "None" +) + +// BuildSource is the SCM used for the build. +type BuildSource struct { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` + + // git contains optional information about git build source + Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` + + // images describes a set of images to be used to provide source for the build + Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"` +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +type ImageSource struct { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + As []string `json:"as,omitempty" protobuf:"bytes,4,rep,name=as"` + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + Paths []ImageSourcePath `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"` + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +type ImageSourcePath struct { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +type SecretBuildSource struct { + // secret is a reference to an existing secret that you want to use in your + // build. + Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +type ConfigMapBuildSource struct { + // configMap is a reference to an existing configmap that you want to use in your + // build. + ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +type BinaryBuildSource struct { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` +} + +// SourceRevision is the revision or commit information from the source for the build +type SourceRevision struct { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git contains information about git-based build source + Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` +} + +// GitSourceRevision is the commit information from a git source for a build +type GitSourceRevision struct { + // commit is the commit hash identifying a specific commit + Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` + + // author is the author of a specific commit + Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` + + // committer is the committer of a specific commit + Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` + + // message is the description of a specific commit + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + + // noProxy is the list of domains for which the proxy should not be used + NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` +} + +// GitBuildSource defines the parameters of a Git SCM +type GitBuildSource struct { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` + + // ref is the branch/tag/ref to build. + Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` +} + +// SourceControlUser defines the identity of a user of source control +type SourceControlUser struct { + // name of the source control user + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // email of the source control user + Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` +} + +// BuildStrategy contains the details of how to perform a build. +type BuildStrategy struct { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + Type BuildStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` + + // dockerStrategy holds the parameters to the container image build strategy. + DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` + + // sourceStrategy holds the parameters to the Source build strategy. + SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` + + // customStrategy holds the parameters to the Custom build strategy + CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` + + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` +} + +// BuildStrategyType describes a particular way of performing a build. +type BuildStrategyType string + +// Valid values for BuildStrategyType. +const ( + // DockerBuildStrategyType performs builds using a Dockerfile. + DockerBuildStrategyType BuildStrategyType = "Docker" + + // SourceBuildStrategyType performs builds build using Source To Images with a Git repository + // and a builder image. + SourceBuildStrategyType BuildStrategyType = "Source" + + // CustomBuildStrategyType performs builds using custom builder container image. + CustomBuildStrategyType BuildStrategyType = "Custom" + + // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. + JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" +) + +// CustomBuildStrategy defines input parameters specific to Custom build. +type CustomBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"` + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // secrets is a list of additional secrets that will be included in the build pod + Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"` + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"` +} + +// ImageOptimizationPolicy describes what optimizations the builder can perform when building images. +type ImageOptimizationPolicy string + +const ( + // ImageOptimizationNone will generate a canonical container image as produced by the + // `container image build` command. + ImageOptimizationNone ImageOptimizationPolicy = "None" + + // ImageOptimizationSkipLayers is an experimental policy and will avoid creating + // unique layers for each dockerfile line, resulting in smaller images and saving time + // during creation. Some Dockerfile syntax is not fully supported - content added to + // a VOLUME by an earlier layer may have incorrect uid, gid, and filesystem permissions. + // If an unsupported setting is detected, the build will fail. + ImageOptimizationSkipLayers ImageOptimizationPolicy = "SkipLayers" + + // ImageOptimizationSkipLayersAndWarn is the same as SkipLayers, but will only + // warn to the build output instead of failing when unsupported syntax is detected. This + // policy is experimental. + ImageOptimizationSkipLayersAndWarn ImageOptimizationPolicy = "SkipLayersAndWarn" +) + +// DockerBuildStrategy defines input parameters specific to container image build. +type DockerBuildStrategy struct { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"` + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"` + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + ImageOptimizationPolicy *ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty" protobuf:"bytes,8,opt,name=imageOptimizationPolicy,casttype=ImageOptimizationPolicy"` + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +type SourceBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // scripts is the location of Source scripts + Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"` + + // incremental flag forces the Source build to do incremental builds if true. + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"` + + // deprecated json field, do not reuse: runtimeImage + // +k8s:protobuf-deprecated=runtimeImage,7 + + // deprecated json field, do not reuse: runtimeArtifacts + // +k8s:protobuf-deprecated=runtimeArtifacts,8 + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +type JenkinsPipelineBuildStrategy struct { + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` + + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` + + // env contains additional environment variables you want to pass into a build pipeline. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +type BuildPostCommitSpec struct { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"` +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +type BuildOutput struct { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` + + // pushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"` +} + +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // value defines the literal value of the label. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=BuildRequest,result=Build +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status holds any relevant information about a build config + // +optional + Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// BuildConfigSpec describes when and how builds are created +type BuildConfigSpec struct { + + //triggers determine how new Builds can be launched from a BuildConfig. If + //no triggers are defined, a new build can only occur as a result of an + //explicit client build creation. + // +optional + Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"` + + // runPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` + + // CommonSpec is the desired build specification + CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"` + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"` +} + +// BuildRunPolicy defines the behaviour of how the new builds are executed +// from the existing build configuration. +type BuildRunPolicy string + +const ( + // BuildRunPolicyParallel schedules new builds immediately after they are + // created. Builds will be executed in parallel. + BuildRunPolicyParallel BuildRunPolicy = "Parallel" + + // BuildRunPolicySerial schedules new builds to execute in a sequence as + // they are created. Every build gets queued up and will execute when the + // previous build completes. This is the default policy. + BuildRunPolicySerial BuildRunPolicy = "Serial" + + // BuildRunPolicySerialLatestOnly schedules only the latest build to execute, + // cancelling all the previously queued build. + BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly" +) + +// BuildConfigStatus contains current state of the build config object. +type BuildConfigStatus struct { + // lastVersion is used to inform about number of last triggered build. + // +optional + LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` + + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional + ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` +} + +// SecretLocalReference contains information that points to the local secret being used +type SecretLocalReference struct { + // name is the name of the resource in the same namespace being referenced + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +type WebHookTrigger struct { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"` + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + SecretReference *SecretLocalReference `json:"secretReference,omitempty" protobuf:"bytes,3,opt,name=secretReference"` +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +type ImageChangeTrigger struct { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty" protobuf:"varint,3,opt,name=paused"` +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +type ImageStreamTagReference struct { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +type ImageChangeTriggerStatus struct { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is the ImageStreamTag that is the source of the trigger. + From ImageStreamTagReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + LastTriggerTime metav1.Time `json:"lastTriggerTime,omitempty" protobuf:"bytes,3,opt,name=lastTriggerTime"` +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +type BuildTriggerPolicy struct { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"` + + // github contains the parameters for a GitHub webhook type of trigger + GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"` + + // generic contains the parameters for a Generic webhook type of trigger + GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"` + + // imageChange contains parameters for an ImageChange type of trigger + ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"` + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + GitLabWebHook *WebHookTrigger `json:"gitlab,omitempty" protobuf:"bytes,5,opt,name=gitlab"` + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + BitbucketWebHook *WebHookTrigger `json:"bitbucket,omitempty" protobuf:"bytes,6,opt,name=bitbucket"` +} + +// BuildTriggerType refers to a specific BuildTriggerPolicy implementation. +type BuildTriggerType string + +const ( + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub" + GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github" + + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + GenericWebHookBuildTriggerType BuildTriggerType = "Generic" + GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic" + + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + GitLabWebHookBuildTriggerType BuildTriggerType = "GitLab" + + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + BitbucketWebHookBuildTriggerType BuildTriggerType = "Bitbucket" + + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + ImageChangeBuildTriggerType BuildTriggerType = "ImageChange" + ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange" + + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of builds + Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of build configs + Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +type GenericWebHookEvent struct { + // type is the type of source repository + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git is the git information if the Type is BuildSourceGit + Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` +} + +// GitInfo is the aggregated git information for a generic webhook post +type GitInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` + + // refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` +} + +// GitRefInfo is a single ref +type GitRefInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLog struct { + metav1.TypeMeta `json:",inline"` +} + +// DockerStrategyOptions contains extra strategy options for container image builds +type DockerStrategyOptions struct { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,1,rep,name=buildArgs"` + + // noCache overrides the docker-strategy noCache option in the build config + NoCache *bool `json:"noCache,omitempty" protobuf:"varint,2,opt,name=noCache"` +} + +// SourceStrategyOptions contains extra strategy options for Source builds +type SourceStrategyOptions struct { + // incremental overrides the source-strategy incremental option in the build config + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,1,opt,name=incremental"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // revision is the information from the source for a specific repo snapshot. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // triggeredByImage is the Image that triggered this build. + TriggeredByImage *corev1.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"` + + // from is the reference to the ImageStreamTag that triggered the build. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // binary indicates a request to build from a binary provided to the builder + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"` + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"` + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` + + // sourceStrategyOptions contains additional source-strategy specific options for the build + SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BinaryBuildRequestOptions struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"` + + // TODO: Improve map[string][]string conversion so we can handled nested objects + + // revision.commit is the value identifying a specific commit + Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"` + + // revision.message is the description of a specific commit + Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"` + + // revision.authorName of the source control user + AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"` + + // revision.authorEmail of the source control user + AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"` + + // revision.committerName of the source control user + CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"` + + // revision.committerEmail of the source control user + CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // previous returns previous build logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // nowait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the build for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,11,opt,name=insecureSkipTLSVerifyBackend"` +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +type SecretSpec struct { + // secretSource is a reference to the secret + SecretSource corev1.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"` + + // mountPath is the path at which to mount the secret + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +type BuildVolume struct { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // source represents the location and type of the mounted volume. + // +required + Source BuildVolumeSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + Mounts []BuildVolumeMount `json:"mounts" patchStrategy:"merge" patchMergeKey:"destinationPath" protobuf:"bytes,3,opt,name=mounts"` +} + +// BuildVolumeSourceType represents a build volume source type +type BuildVolumeSourceType string + +const ( + // BuildVolumeSourceTypeSecret is the Secret build source volume type + BuildVolumeSourceTypeSecret BuildVolumeSourceType = "Secret" + + // BuildVolumeSourceTypeConfigmap is the ConfigMap build source volume type + BuildVolumeSourceTypeConfigMap BuildVolumeSourceType = "ConfigMap" + + // BuildVolumeSourceTypeCSI is the CSI build source volume type + BuildVolumeSourceTypeCSI BuildVolumeSourceType = "CSI" +) + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +type BuildVolumeSource struct { + + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + Type BuildVolumeSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildVolumeSourceType"` + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // configMap represents a ConfigMap that should populate this volume + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,4,opt,name=csi"` +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +type BuildVolumeMount struct { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + DestinationPath string `json:"destinationPath" protobuf:"bytes,1,opt,name=destinationPath"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..03cb4a6a7 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go @@ -0,0 +1,1610 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions. +func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions { + if in == nil { + return nil + } + out := new(BinaryBuildRequestOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource. +func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource { + if in == nil { + return nil + } + out := new(BinaryBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause. +func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause { + if in == nil { + return nil + } + out := new(BitbucketWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildCondition) DeepCopyInto(out *BuildCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition. +func (in *BuildCondition) DeepCopy() *BuildCondition { + if in == nil { + return nil + } + out := new(BuildCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfig) DeepCopyInto(out *BuildConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig. +func (in *BuildConfig) DeepCopy() *BuildConfig { + if in == nil { + return nil + } + out := new(BuildConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList. +func (in *BuildConfigList) DeepCopy() *BuildConfigList { + if in == nil { + return nil + } + out := new(BuildConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) { + *out = *in + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.SuccessfulBuildsHistoryLimit != nil { + in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedBuildsHistoryLimit != nil { + in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec. +func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec { + if in == nil { + return nil + } + out := new(BuildConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) { + *out = *in + if in.ImageChangeTriggers != nil { + in, out := &in.ImageChangeTriggers, &out.ImageChangeTriggers + *out = make([]ImageChangeTriggerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus. +func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus { + if in == nil { + return nil + } + out := new(BuildConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLog) DeepCopyInto(out *BuildLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog. +func (in *BuildLog) DeepCopy() *BuildLog { + if in == nil { + return nil + } + out := new(BuildLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions. +func (in *BuildLogOptions) DeepCopy() *BuildLogOptions { + if in == nil { + return nil + } + out := new(BuildLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOutput) DeepCopyInto(out *BuildOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput. +func (in *BuildOutput) DeepCopy() *BuildOutput { + if in == nil { + return nil + } + out := new(BuildOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec. +func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec { + if in == nil { + return nil + } + out := new(BuildPostCommitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRequest) DeepCopyInto(out *BuildRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(corev1.ObjectReference) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.LastVersion != nil { + in, out := &in.LastVersion, &out.LastVersion + *out = new(int64) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategyOptions != nil { + in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions + *out = new(SourceStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest. +func (in *BuildRequest) DeepCopy() *BuildRequest { + if in == nil { + return nil + } + out := new(BuildRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSource) DeepCopyInto(out *BuildSource) { + *out = *in + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(string) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ConfigMapBuildSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource. +func (in *BuildSource) DeepCopy() *BuildSource { + if in == nil { + return nil + } + out := new(BuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(corev1.ObjectReference) + **out = **in + } + in.Output.DeepCopyInto(&out.Output) + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BuildCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. +func (in *BuildStatus) DeepCopy() *BuildStatus { + if in == nil { + return nil + } + out := new(BuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(BuildStatusOutputTo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput. +func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput { + if in == nil { + return nil + } + out := new(BuildStatusOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo. +func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo { + if in == nil { + return nil + } + out := new(BuildStatusOutputTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) { + *out = *in + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy. +func (in *BuildStrategy) DeepCopy() *BuildStrategy { + if in == nil { + return nil + } + out := new(BuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) { + *out = *in + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(GitLabWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(BitbucketWebHookCause) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause. +func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause { + if in == nil { + return nil + } + out := new(BuildTriggerCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) { + *out = *in + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy. +func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy { + if in == nil { + return nil + } + out := new(BuildTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolume) DeepCopyInto(out *BuildVolume) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]BuildVolumeMount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolume. +func (in *BuildVolume) DeepCopy() *BuildVolume { + if in == nil { + return nil + } + out := new(BuildVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeMount) DeepCopyInto(out *BuildVolumeMount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeMount. +func (in *BuildVolumeMount) DeepCopy() *BuildVolumeMount { + if in == nil { + return nil + } + out := new(BuildVolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeSource) DeepCopyInto(out *BuildVolumeSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(corev1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeSource. +func (in *BuildVolumeSource) DeepCopy() *BuildVolumeSource { + if in == nil { + return nil + } + out := new(BuildVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + in.Strategy.DeepCopyInto(&out.Strategy) + in.Output.DeepCopyInto(&out.Output) + in.Resources.DeepCopyInto(&out.Resources) + in.PostCommit.DeepCopyInto(&out.PostCommit) + if in.CompletionDeadlineSeconds != nil { + in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MountTrustedCA != nil { + in, out := &in.MountTrustedCA, &out.MountTrustedCA + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. +func (in *CommonSpec) DeepCopy() *CommonSpec { + if in == nil { + return nil + } + out := new(CommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause. +func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause { + if in == nil { + return nil + } + out := new(CommonWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) { + *out = *in + out.ConfigMap = in.ConfigMap + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource. +func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource { + if in == nil { + return nil + } + out := new(ConfigMapBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy. +func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy { + if in == nil { + return nil + } + out := new(CustomBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageOptimizationPolicy != nil { + in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy + *out = new(ImageOptimizationPolicy) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy. +func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy { + if in == nil { + return nil + } + out := new(DockerBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) { + *out = *in + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoCache != nil { + in, out := &in.NoCache, &out.NoCache + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions. +func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions { + if in == nil { + return nil + } + out := new(DockerStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause. +func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause { + if in == nil { + return nil + } + out := new(GenericWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent. +func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent { + if in == nil { + return nil + } + out := new(GenericWebHookEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource. +func (in *GitBuildSource) DeepCopy() *GitBuildSource { + if in == nil { + return nil + } + out := new(GitBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause. +func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause { + if in == nil { + return nil + } + out := new(GitHubWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitInfo) DeepCopyInto(out *GitInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + if in.Refs != nil { + in, out := &in.Refs, &out.Refs + *out = make([]GitRefInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo. +func (in *GitInfo) DeepCopy() *GitInfo { + if in == nil { + return nil + } + out := new(GitInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause. +func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause { + if in == nil { + return nil + } + out := new(GitLabWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo. +func (in *GitRefInfo) DeepCopy() *GitRefInfo { + if in == nil { + return nil + } + out := new(GitRefInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitSourceRevision) DeepCopyInto(out *GitSourceRevision) { + *out = *in + out.Author = in.Author + out.Committer = in.Committer + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceRevision. +func (in *GitSourceRevision) DeepCopy() *GitSourceRevision { + if in == nil { + return nil + } + out := new(GitSourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeCause) DeepCopyInto(out *ImageChangeCause) { + *out = *in + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeCause. +func (in *ImageChangeCause) DeepCopy() *ImageChangeCause { + if in == nil { + return nil + } + out := new(ImageChangeCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTrigger) DeepCopyInto(out *ImageChangeTrigger) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTrigger. +func (in *ImageChangeTrigger) DeepCopy() *ImageChangeTrigger { + if in == nil { + return nil + } + out := new(ImageChangeTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTriggerStatus) DeepCopyInto(out *ImageChangeTriggerStatus) { + *out = *in + out.From = in.From + in.LastTriggerTime.DeepCopyInto(&out.LastTriggerTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTriggerStatus. +func (in *ImageChangeTriggerStatus) DeepCopy() *ImageChangeTriggerStatus { + if in == nil { + return nil + } + out := new(ImageChangeTriggerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in + out.From = in.From + if in.As != nil { + in, out := &in.As, &out.As + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + copy(*out, *in) + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSourcePath) DeepCopyInto(out *ImageSourcePath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSourcePath. +func (in *ImageSourcePath) DeepCopy() *ImageSourcePath { + if in == nil { + return nil + } + out := new(ImageSourcePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagReference) DeepCopyInto(out *ImageStreamTagReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagReference. +func (in *ImageStreamTagReference) DeepCopy() *ImageStreamTagReference { + if in == nil { + return nil + } + out := new(ImageStreamTagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineBuildStrategy) DeepCopyInto(out *JenkinsPipelineBuildStrategy) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineBuildStrategy. +func (in *JenkinsPipelineBuildStrategy) DeepCopy() *JenkinsPipelineBuildStrategy { + if in == nil { + return nil + } + out := new(JenkinsPipelineBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNodeSelector) DeepCopyInto(out *OptionalNodeSelector) { + { + in := &in + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNodeSelector. +func (in OptionalNodeSelector) DeepCopy() OptionalNodeSelector { + if in == nil { + return nil + } + out := new(OptionalNodeSelector) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBuildSource) DeepCopyInto(out *SecretBuildSource) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBuildSource. +func (in *SecretBuildSource) DeepCopy() *SecretBuildSource { + if in == nil { + return nil + } + out := new(SecretBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretLocalReference) DeepCopyInto(out *SecretLocalReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretLocalReference. +func (in *SecretLocalReference) DeepCopy() *SecretLocalReference { + if in == nil { + return nil + } + out := new(SecretLocalReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + out.SecretSource = in.SecretSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceBuildStrategy) DeepCopyInto(out *SourceBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceBuildStrategy. +func (in *SourceBuildStrategy) DeepCopy() *SourceBuildStrategy { + if in == nil { + return nil + } + out := new(SourceBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlUser) DeepCopyInto(out *SourceControlUser) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlUser. +func (in *SourceControlUser) DeepCopy() *SourceControlUser { + if in == nil { + return nil + } + out := new(SourceControlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRevision) DeepCopyInto(out *SourceRevision) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRevision. +func (in *SourceRevision) DeepCopy() *SourceRevision { + if in == nil { + return nil + } + out := new(SourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyOptions) DeepCopyInto(out *SourceStrategyOptions) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyOptions. +func (in *SourceStrategyOptions) DeepCopy() *SourceStrategyOptions { + if in == nil { + return nil + } + out := new(SourceStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInfo) DeepCopyInto(out *StageInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInfo. +func (in *StageInfo) DeepCopy() *StageInfo { + if in == nil { + return nil + } + out := new(StageInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepInfo) DeepCopyInto(out *StepInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepInfo. +func (in *StepInfo) DeepCopy() *StepInfo { + if in == nil { + return nil + } + out := new(StepInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHookTrigger) DeepCopyInto(out *WebHookTrigger) { + *out = *in + if in.SecretReference != nil { + in, out := &in.SecretReference, &out.SecretReference + *out = new(SecretLocalReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookTrigger. +func (in *WebHookTrigger) DeepCopy() *WebHookTrigger { + if in == nil { + return nil + } + out := new(WebHookTrigger) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..1da784353 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,692 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BinaryBuildRequestOptions = map[string]string{ + "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", + "revision.commit": "revision.commit is the value identifying a specific commit", + "revision.message": "revision.message is the description of a specific commit", + "revision.authorName": "revision.authorName of the source control user", + "revision.authorEmail": "revision.authorEmail of the source control user", + "revision.committerName": "revision.committerName of the source control user", + "revision.committerEmail": "revision.committerEmail of the source control user", +} + +func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { + return map_BinaryBuildRequestOptions +} + +var map_BinaryBuildSource = map[string]string{ + "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", + "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", +} + +func (BinaryBuildSource) SwaggerDoc() map[string]string { + return map_BinaryBuildSource +} + +var map_BitbucketWebHookCause = map[string]string{ + "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.", +} + +func (BitbucketWebHookCause) SwaggerDoc() map[string]string { + return map_BitbucketWebHookCause +} + +var map_Build = map[string]string{ + "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is all the inputs used to execute the build.", + "status": "status is the current status of the build.", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildCondition = map[string]string{ + "": "BuildCondition describes the state of a build at a certain point.", + "type": "type of build condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (BuildCondition) SwaggerDoc() map[string]string { + return map_BuildCondition +} + +var map_BuildConfig = map[string]string{ + "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", + "status": "status holds any relevant information about a build config", +} + +func (BuildConfig) SwaggerDoc() map[string]string { + return map_BuildConfig +} + +var map_BuildConfigList = map[string]string{ + "": "BuildConfigList is a collection of BuildConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of build configs", +} + +func (BuildConfigList) SwaggerDoc() map[string]string { + return map_BuildConfigList +} + +var map_BuildConfigSpec = map[string]string{ + "": "BuildConfigSpec describes when and how builds are created", + "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", + "runPolicy": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", + "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", +} + +func (BuildConfigSpec) SwaggerDoc() map[string]string { + return map_BuildConfigSpec +} + +var map_BuildConfigStatus = map[string]string{ + "": "BuildConfigStatus contains current state of the build config object.", + "lastVersion": "lastVersion is used to inform about number of last triggered build.", + "imageChangeTriggers": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", +} + +func (BuildConfigStatus) SwaggerDoc() map[string]string { + return map_BuildConfigStatus +} + +var map_BuildList = map[string]string{ + "": "BuildList is a collection of Builds.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of builds", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildLog = map[string]string{ + "": "BuildLog is the (unused) resource associated with the build log redirector\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (BuildLog) SwaggerDoc() map[string]string { + return map_BuildLog +} + +var map_BuildLogOptions = map[string]string{ + "": "BuildLogOptions is the REST options for a build log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "previous returns previous build logs. Defaults to false.", + "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "version": "version of the build for which to view logs.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", +} + +func (BuildLogOptions) SwaggerDoc() map[string]string { + return map_BuildLogOptions +} + +var map_BuildOutput = map[string]string{ + "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", +} + +func (BuildOutput) SwaggerDoc() map[string]string { + return map_BuildOutput +} + +var map_BuildPostCommitSpec = map[string]string{ + "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test --verbose\",\n\t }\n\n\tThe above is a convenient form which is equivalent to:\n\n\t \"postCommit\": {\n\t \"command\": [\"/bin/sh\", \"-ic\"],\n\t \"args\": [\"rake test --verbose\"]\n\t }\n\n2. A command as the image entrypoint:\n\n\t \"postCommit\": {\n\t \"commit\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n\tCommand overrides the image entrypoint in the exec form, as documented in\n\tDocker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n\t \"postCommit\": {\n\t\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t\t }\n\n\t This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test $1\",\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is useful if you need to pass arguments that would otherwise be\n\thard to quote properly in the shell script. In the script, $0 will be\n\t\"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n\t \"postCommit\": {\n\t \"command\": [\"rake\", \"test\"],\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", + "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", + "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.", + "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", +} + +func (BuildPostCommitSpec) SwaggerDoc() map[string]string { + return map_BuildPostCommitSpec +} + +var map_BuildRequest = map[string]string{ + "": "BuildRequest is the resource used to pass parameters to build generator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "revision": "revision is the information from the source for a specific repo snapshot.", + "triggeredByImage": "triggeredByImage is the Image that triggered this build.", + "from": "from is the reference to the ImageStreamTag that triggered the build.", + "binary": "binary indicates a request to build from a binary provided to the builder", + "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", + "env": "env contains additional environment variables you want to pass into a builder container.", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "sourceStrategyOptions contains additional source-strategy specific options for the build", +} + +func (BuildRequest) SwaggerDoc() map[string]string { + return map_BuildRequest +} + +var map_BuildSource = map[string]string{ + "": "BuildSource is the SCM used for the build.", + "type": "type of build input to accept", + "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", + "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", + "git": "git contains optional information about git build source", + "images": "images describes a set of images to be used to provide source for the build", + "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", + "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", + "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", + "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.", +} + +func (BuildSource) SwaggerDoc() map[string]string { + return map_BuildSource +} + +var map_BuildSpec = map[string]string{ + "": "BuildSpec has the information to represent a build and also additional information about a build", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_BuildStatus = map[string]string{ + "": "BuildStatus contains the status of a build", + "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".", + "cancelled": "cancelled describes if a cancel event was triggered for the build.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "message": "message is a human-readable message indicating details about why the build has this status.", + "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", + "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", + "duration": "duration contains time.Duration object describing build time.", + "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", + "config": "config is an ObjectReference to the BuildConfig this Build is based on.", + "output": "output describes the container image the build has produced.", + "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", + "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", + "conditions": "conditions represents the latest available observations of a build's current state.", +} + +func (BuildStatus) SwaggerDoc() map[string]string { + return map_BuildStatus +} + +var map_BuildStatusOutput = map[string]string{ + "": "BuildStatusOutput contains the status of the built image.", + "to": "to describes the status of the built image being pushed to a registry.", +} + +func (BuildStatusOutput) SwaggerDoc() map[string]string { + return map_BuildStatusOutput +} + +var map_BuildStatusOutputTo = map[string]string{ + "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.", + "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.", +} + +func (BuildStatusOutputTo) SwaggerDoc() map[string]string { + return map_BuildStatusOutputTo +} + +var map_BuildStrategy = map[string]string{ + "": "BuildStrategy contains the details of how to perform a build.", + "type": "type is the kind of build strategy.", + "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", + "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", + "customStrategy": "customStrategy holds the parameters to the Custom build strategy", + "jenkinsPipelineStrategy": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", +} + +func (BuildStrategy) SwaggerDoc() map[string]string { + return map_BuildStrategy +} + +var map_BuildTriggerCause = map[string]string{ + "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", + "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", + "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", + "githubWebHook": "githubWebHook represents data for a GitHub webhook that fired a specific build.", + "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", + "gitlabWebHook": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", +} + +func (BuildTriggerCause) SwaggerDoc() map[string]string { + return map_BuildTriggerCause +} + +var map_BuildTriggerPolicy = map[string]string{ + "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", + "type": "type is the type of build trigger. Valid values:\n\n- GitHub GitHubWebHookBuildTriggerType represents a trigger that launches builds on GitHub webhook invocations\n\n- Generic GenericWebHookBuildTriggerType represents a trigger that launches builds on generic webhook invocations\n\n- GitLab GitLabWebHookBuildTriggerType represents a trigger that launches builds on GitLab webhook invocations\n\n- Bitbucket BitbucketWebHookBuildTriggerType represents a trigger that launches builds on Bitbucket webhook invocations\n\n- ImageChange ImageChangeBuildTriggerType represents a trigger that launches builds on availability of a new version of an image\n\n- ConfigChange ConfigChangeBuildTriggerType will trigger a build on an initial build config creation WARNING: In the future the behavior will change to trigger a build on any config change", + "github": "github contains the parameters for a GitHub webhook type of trigger", + "generic": "generic contains the parameters for a Generic webhook type of trigger", + "imageChange": "imageChange contains parameters for an ImageChange type of trigger", + "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger", + "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger", +} + +func (BuildTriggerPolicy) SwaggerDoc() map[string]string { + return map_BuildTriggerPolicy +} + +var map_BuildVolume = map[string]string{ + "": "BuildVolume describes a volume that is made available to build pods, such that it can be mounted into buildah's runtime environment. Only a subset of Kubernetes Volume sources are supported.", + "name": "name is a unique identifier for this BuildVolume. It must conform to the Kubernetes DNS label standard and be unique within the pod. Names that collide with those added by the build controller will result in a failed build with an error message detailing which name caused the error. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "source": "source represents the location and type of the mounted volume.", + "mounts": "mounts represents the location of the volume in the image build container", +} + +func (BuildVolume) SwaggerDoc() map[string]string { + return map_BuildVolume +} + +var map_BuildVolumeMount = map[string]string{ + "": "BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.", + "destinationPath": "destinationPath is the path within the buildah runtime environment at which the volume should be mounted. The transient mount within the build image and the backing volume will both be mounted read only. Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated by the builder process Paths that collide with those added by the build controller will result in a failed build with an error message detailing which path caused the error.", +} + +func (BuildVolumeMount) SwaggerDoc() map[string]string { + return map_BuildVolumeMount +} + +var map_BuildVolumeSource = map[string]string{ + "": "BuildVolumeSource represents the source of a volume to mount Only one of its supported types may be specified at any given time.", + "type": "type is the BuildVolumeSourceType for the volume source. Type must match the populated volume source. Valid types are: Secret, ConfigMap", + "secret": "secret represents a Secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "configMap": "configMap represents a ConfigMap that should populate this volume", + "csi": "csi represents ephemeral storage provided by external CSI drivers which support this capability", +} + +func (BuildVolumeSource) SwaggerDoc() map[string]string { + return map_BuildVolumeSource +} + +var map_CommonSpec = map[string]string{ + "": "CommonSpec encapsulates all the inputs necessary to represent a build.", + "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", + "source": "source describes the SCM in use.", + "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", + "strategy": "strategy defines how to perform a build.", + "output": "output describes the container image the Strategy should produce.", + "resources": "resources computes resource requirements to execute the build.", + "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", + "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", + "mountTrustedCA": "mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in the cluster's proxy configuration, into the build. This lets processes within a build trust components signed by custom PKI certificate authorities, such as private artifact repositories and HTTPS proxies.\n\nWhen this field is set to true, the contents of `/etc/pki/ca-trust` within the build are managed by the build container, and any changes to this directory or its subdirectories (for example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.", +} + +func (CommonSpec) SwaggerDoc() map[string]string { + return map_CommonSpec +} + +var map_CommonWebHookCause = map[string]string{ + "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", + "revision": "revision is the git source revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (CommonWebHookCause) SwaggerDoc() map[string]string { + return map_CommonWebHookCause +} + +var map_ConfigMapBuildSource = map[string]string{ + "": "ConfigMapBuildSource describes a configmap and its destination directory that will be used only at the build time. The content of the configmap referenced here will be copied into the destination directory instead of mounting.", + "configMap": "configMap is a reference to an existing configmap that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the configmap should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (ConfigMapBuildSource) SwaggerDoc() map[string]string { + return map_ConfigMapBuildSource +} + +var map_CustomBuildStrategy = map[string]string{ + "": "CustomBuildStrategy defines input parameters specific to Custom build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build container images) from inside the container.", + "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally", + "secrets": "secrets is a list of additional secrets that will be included in the build pod", + "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder", +} + +func (CustomBuildStrategy) SwaggerDoc() map[string]string { + return map_CustomBuildStrategy +} + +var map_DockerBuildStrategy = map[string]string{ + "": "DockerBuildStrategy defines input parameters specific to container image build.", + "from": "from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, this will replace the image in the last FROM directive of the file.", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "noCache": "noCache if set to true indicates that the container image build must be executed with the --no-cache=true flag", + "env": "env contains additional environment variables you want to pass into a builder container.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir). Defaults to `Dockerfile` if unset.", + "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details. NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field are ignored.", + "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (DockerBuildStrategy) SwaggerDoc() map[string]string { + return map_DockerBuildStrategy +} + +var map_DockerStrategyOptions = map[string]string{ + "": "DockerStrategyOptions contains extra strategy options for container image builds", + "buildArgs": "Args contains any build arguments that are to be passed to Docker. See https://docs.docker.com/engine/reference/builder/#/arg for more details", + "noCache": "noCache overrides the docker-strategy noCache option in the build config", +} + +func (DockerStrategyOptions) SwaggerDoc() map[string]string { + return map_DockerStrategyOptions +} + +var map_GenericWebHookCause = map[string]string{ + "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.", + "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GenericWebHookCause) SwaggerDoc() map[string]string { + return map_GenericWebHookCause +} + +var map_GenericWebHookEvent = map[string]string{ + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", +} + +func (GenericWebHookEvent) SwaggerDoc() map[string]string { + return map_GenericWebHookEvent +} + +var map_GitBuildSource = map[string]string{ + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", +} + +func (GitBuildSource) SwaggerDoc() map[string]string { + return map_GitBuildSource +} + +var map_GitHubWebHookCause = map[string]string{ + "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.", + "revision": "revision is the git revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GitHubWebHookCause) SwaggerDoc() map[string]string { + return map_GitHubWebHookCause +} + +var map_GitInfo = map[string]string{ + "": "GitInfo is the aggregated git information for a generic webhook post", + "refs": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", +} + +func (GitInfo) SwaggerDoc() map[string]string { + return map_GitInfo +} + +var map_GitLabWebHookCause = map[string]string{ + "": "GitLabWebHookCause has information about a GitLab webhook that triggered a build.", +} + +func (GitLabWebHookCause) SwaggerDoc() map[string]string { + return map_GitLabWebHookCause +} + +var map_GitRefInfo = map[string]string{ + "": "GitRefInfo is a single ref", +} + +func (GitRefInfo) SwaggerDoc() map[string]string { + return map_GitRefInfo +} + +var map_GitSourceRevision = map[string]string{ + "": "GitSourceRevision is the commit information from a git source for a build", + "commit": "commit is the commit hash identifying a specific commit", + "author": "author is the author of a specific commit", + "committer": "committer is the committer of a specific commit", + "message": "message is the description of a specific commit", +} + +func (GitSourceRevision) SwaggerDoc() map[string]string { + return map_GitSourceRevision +} + +var map_ImageChangeCause = map[string]string{ + "": "ImageChangeCause contains information about the image that triggered a build", + "imageID": "imageID is the ID of the image that triggered a new build.", + "fromRef": "fromRef contains detailed information about an image that triggered a build.", +} + +func (ImageChangeCause) SwaggerDoc() map[string]string { + return map_ImageChangeCause +} + +var map_ImageChangeTrigger = map[string]string{ + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build This field is deprecated and will be removed in a future release. Deprecated", + "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", + "paused": "paused is true if this trigger is temporarily disabled. Optional.", +} + +func (ImageChangeTrigger) SwaggerDoc() map[string]string { + return map_ImageChangeTrigger +} + +var map_ImageChangeTriggerStatus = map[string]string{ + "": "ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy specified in the BuildConfigSpec.Triggers struct.", + "lastTriggeredImageID": "lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.", + "from": "from is the ImageStreamTag that is the source of the trigger.", + "lastTriggerTime": "lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. This field is only updated when this trigger specifically started a Build.", +} + +func (ImageChangeTriggerStatus) SwaggerDoc() map[string]string { + return map_ImageChangeTriggerStatus +} + +var map_ImageLabel = map[string]string{ + "": "ImageLabel represents a label applied to the resulting image.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ImageSource = map[string]string{ + "": "ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields).", + "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", + "as": "A list of image names that this source will be used in place of during a multi-stage container image build. For instance, a Dockerfile that uses \"COPY --from=nginx:latest\" will first check for an image source that has \"nginx:latest\" in this field before attempting to pull directly. If the Dockerfile does not reference an image source it is ignored. This field and paths may both be set, in which case the contents will be used twice.", + "paths": "paths is a list of source and destination paths to copy from the image. This content will be copied into the build context prior to starting the build. If no paths are set, the build context will not be altered.", + "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", +} + +func (ImageSource) SwaggerDoc() map[string]string { + return map_ImageSource +} + +var map_ImageSourcePath = map[string]string{ + "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.", + "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination.", + "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.", +} + +func (ImageSourcePath) SwaggerDoc() map[string]string { + return map_ImageSourcePath +} + +var map_ImageStreamTagReference = map[string]string{ + "": "ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.", + "namespace": "namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located", + "name": "name is the name of the ImageStreamTag for an ImageChangeTrigger", +} + +func (ImageStreamTagReference) SwaggerDoc() map[string]string { + return map_ImageStreamTagReference +} + +var map_JenkinsPipelineBuildStrategy = map[string]string{ + "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", + "jenkinsfilePath": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "env": "env contains additional environment variables you want to pass into a build pipeline.", +} + +func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { + return map_JenkinsPipelineBuildStrategy +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines what proxies to use for an operation", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "noProxy": "noProxy is the list of domains for which the proxy should not be used", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SecretBuildSource = map[string]string{ + "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", + "secret": "secret is a reference to an existing secret that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (SecretBuildSource) SwaggerDoc() map[string]string { + return map_SecretBuildSource +} + +var map_SecretLocalReference = map[string]string{ + "": "SecretLocalReference contains information that points to the local secret being used", + "name": "name is the name of the resource in the same namespace being referenced", +} + +func (SecretLocalReference) SwaggerDoc() map[string]string { + return map_SecretLocalReference +} + +var map_SecretSpec = map[string]string{ + "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point", + "secretSource": "secretSource is a reference to the secret", + "mountPath": "mountPath is the path at which to mount the secret", +} + +func (SecretSpec) SwaggerDoc() map[string]string { + return map_SecretSpec +} + +var map_SourceBuildStrategy = map[string]string{ + "": "SourceBuildStrategy defines input parameters specific to an Source build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "scripts": "scripts is the location of Source scripts", + "incremental": "incremental flag forces the Source build to do incremental builds if true.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (SourceBuildStrategy) SwaggerDoc() map[string]string { + return map_SourceBuildStrategy +} + +var map_SourceControlUser = map[string]string{ + "": "SourceControlUser defines the identity of a user of source control", + "name": "name of the source control user", + "email": "email of the source control user", +} + +func (SourceControlUser) SwaggerDoc() map[string]string { + return map_SourceControlUser +} + +var map_SourceRevision = map[string]string{ + "": "SourceRevision is the revision or commit information from the source for the build", + "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", + "git": "git contains information about git-based build source", +} + +func (SourceRevision) SwaggerDoc() map[string]string { + return map_SourceRevision +} + +var map_SourceStrategyOptions = map[string]string{ + "": "SourceStrategyOptions contains extra strategy options for Source builds", + "incremental": "incremental overrides the source-strategy incremental option in the build config", +} + +func (SourceStrategyOptions) SwaggerDoc() map[string]string { + return map_SourceStrategyOptions +} + +var map_StageInfo = map[string]string{ + "": "StageInfo contains details about a build stage.", + "name": "name is a unique identifier for each build stage that occurs.", + "startTime": "startTime is a timestamp representing the server time when this Stage started. It is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the stage took to complete in milliseconds. Note: the duration of a stage can exceed the sum of the duration of the steps within the stage as not all actions are accounted for in explicit build steps.", + "steps": "steps contains details about each step that occurs during a build stage including start time and duration in milliseconds.", +} + +func (StageInfo) SwaggerDoc() map[string]string { + return map_StageInfo +} + +var map_StepInfo = map[string]string{ + "": "StepInfo contains details about a build step.", + "name": "name is a unique identifier for each build step.", + "startTime": "startTime is a timestamp representing the server time when this Step started. it is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the step took to complete in milliseconds.", +} + +func (StepInfo) SwaggerDoc() map[string]string { + return map_StepInfo +} + +var map_WebHookTrigger = map[string]string{ + "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post", + "secret": "secret used to validate requests. Deprecated: use SecretReference instead.", + "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.", + "secretReference": "secretReference is a reference to a secret in the same namespace, containing the value to be validated when the webhook is invoked. The secret being referenced must contain a key named \"WebHookSecretKey\", the value of which will be checked against the value supplied in the webhook invocation.", +} + +func (WebHookTrigger) SwaggerDoc() map[string]string { + return map_WebHookTrigger +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_authentication.go b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_authentication.go index 004e94723..52a41b2fe 100644 --- a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -91,6 +91,7 @@ type AuthenticationSpec struct { // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -253,9 +254,16 @@ type TokenIssuer struct { // The Kubernetes API server determines how authentication tokens should be handled // by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. // - // issuerURL must use the 'https' scheme. + // Must be at least 1 character and must not exceed 512 characters in length. + // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 // +required URL string `json:"issuerURL"` @@ -320,10 +328,10 @@ type TokenClaimMappings struct { // used to construct the extra attribute for the cluster identity. // When omitted, no extra attributes will be present on the cluster identity. // key values for extra mappings must be unique. - // A maximum of 64 extra attribute mappings may be provided. + // A maximum of 32 extra attribute mappings may be provided. // // +optional - // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:MaxItems=32 // +listType=map // +listMapKey=key // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings @@ -375,10 +383,10 @@ type TokenClaimOrExpressionMapping struct { // Precisely one of claim or expression must be set. // expression must not be specified when claim is set. // When specified, expression must be at least 1 character in length - // and must not exceed 4096 characters in length. + // and must not exceed 1024 characters in length. // // +optional - // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:MaxLength=1024 // +kubebuilder:validation:MinLength=1 Expression string `json:"expression,omitempty"` } @@ -437,12 +445,12 @@ type ExtraMapping struct { // For example, the 'sub' claim value can be accessed as 'claims.sub'. // Nested claims can be accessed using dot notation ('claims.foo.bar'). // - // valueExpression must not exceed 4096 characters in length. + // valueExpression must not exceed 1024 characters in length. // valueExpression must not be empty. // // +required // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:MaxLength=1024 ValueExpression string `json:"valueExpression"` } diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 1fc06418c..a119e2c05 100644 --- a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -532,7 +532,7 @@ type AWSPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +openshift:enable:FeatureGate=AWSClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -594,6 +594,19 @@ type AzurePlatformStatus struct { // +listType=atomic // +optional ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall + // +optional + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. @@ -745,7 +758,7 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNS + // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -760,7 +773,7 @@ type GCPPlatformStatus struct { // +kubebuilder:validation:MaxItems=8 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" // +optional - // +openshift:enable:FeatureGate=GCPCustomAPIEndpoints + // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 70edc1769..788e10479 100644 --- a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -616,6 +616,11 @@ func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { *out = make([]AzureResourceTag, len(*in)) copy(*out, *in) } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 91881630b..6d756e8f9 100644 --- a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -361,11 +361,12 @@ infrastructures.config.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSClusterHostedDNS + - AWSClusterHostedDNSInstall + - AzureClusterHostedDNSInstall - DualReplica - DyanmicServiceEndpointIBMCloud - - GCPClusterHostedDNS - - GCPCustomAPIEndpoints + - GCPClusterHostedDNSInstall + - GCPCustomAPIEndpointsInstall - HighlyAvailableArbiter - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index eb78ad7ca..fe10c58f5 100644 --- a/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/openshift/tests-extension/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -399,7 +399,7 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { var map_ExtraMapping = map[string]string{ "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", - "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 4096 characters in length. valueExpression must not be empty.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 1024 characters in length. valueExpression must not be empty.", } func (ExtraMapping) SwaggerDoc() map[string]string { @@ -477,7 +477,7 @@ var map_TokenClaimMappings = map[string]string{ "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", - "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 64 extra attribute mappings may be provided.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { @@ -487,7 +487,7 @@ func (TokenClaimMappings) SwaggerDoc() map[string]string { var map_TokenClaimOrExpressionMapping = map[string]string{ "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", - "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 4096 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length.", } func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { @@ -504,7 +504,7 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { } var map_TokenIssuer = map[string]string{ - "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nissuerURL must use the 'https' scheme.", + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", } @@ -1480,6 +1480,7 @@ var map_AzurePlatformStatus = map[string]string{ "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/doc.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/doc.go new file mode 100644 index 000000000..cc194d24d --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package docker10 is the docker10 version of the API. +package docker10 diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/register.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/register.go new file mode 100644 index 000000000..3d5ad268a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/register.go @@ -0,0 +1,47 @@ +package docker10 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/types_docker.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/types_docker.go new file mode 100644 index 000000000..03f0f67fc --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/types_docker.go @@ -0,0 +1,60 @@ +package docker10 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is the type representing a container image and its various properties when +// retrieved from the Docker client API. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"Id"` + Parent string `json:"Parent,omitempty"` + Comment string `json:"Comment,omitempty"` + Created metav1.Time `json:"Created,omitempty"` + Container string `json:"Container,omitempty"` + ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty"` + Config *DockerConfig `json:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go new file mode 100644 index 000000000..be828dbe4 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package docker10 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..e818f784a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go @@ -0,0 +1,30 @@ +package docker10 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is the type representing a container image and its various properties when retrieved from the Docker client API.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go new file mode 100644 index 000000000..ddeb4403c --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go @@ -0,0 +1,18 @@ +package dockerpre012 + +// DeepCopyInto is manually built to copy the (probably bugged) time.Time +func (in *ImagePre012) DeepCopyInto(out *ImagePre012) { + *out = *in + out.Created = in.Created + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(Config) + (*in).DeepCopyInto(*out) + } + } + return +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/doc.go new file mode 100644 index 000000000..e4a56260f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package dockerpre012 is the dockerpre012 version of the API. +package dockerpre012 diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/register.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/register.go new file mode 100644 index 000000000..7ce2adb0a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/register.go @@ -0,0 +1,46 @@ +package dockerpre012 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go new file mode 100644 index 000000000..1111892a9 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go @@ -0,0 +1,140 @@ +package dockerpre012 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the +// version of metadata that the container image registry uses to persist metadata. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created metav1.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + // This field is not supported in pre012 and will always be empty. + Labels map[string]string `json:"Labels,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` + PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Port string diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go new file mode 100644 index 000000000..c3c9b29d2 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go @@ -0,0 +1,217 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package dockerpre012 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[Port]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012. +func (in *ImagePre012) DeepCopy() *ImagePre012 { + if in == nil { + return nil + } + out := new(ImagePre012) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..04900e809 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go @@ -0,0 +1,55 @@ +package dockerpre012 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config is the list of configuration options used when creating a container. Config does not contain the options that are specific to starting a container on a given host. Those are contained in HostConfig Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", + "Labels": "This field is not supported in pre012 and will always be empty.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the version of metadata that the container image registry uses to persist metadata.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +var map_ImagePre012 = map[string]string{ + "": "ImagePre012 serves the same purpose as the Image type except that it is for earlier versions of the Docker API (pre-012 to be specific) Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (ImagePre012) SwaggerDoc() map[string]string { + return map_ImagePre012 +} + +var map_Mount = map[string]string{ + "": "Mount represents a mount point in the container.\n\nIt has been added in the version 1.20 of the Docker API, available since Docker 1.8. Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Mount) SwaggerDoc() map[string]string { + return map_Mount +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/consts.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/consts.go new file mode 100644 index 000000000..11f57a44a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/consts.go @@ -0,0 +1,69 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +const ( + // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry. + ManagedByOpenShiftAnnotation = "openshift.io/image.managed" + + // DockerImageRepositoryCheckAnnotation indicates that OpenShift has + // attempted to import tag and image information from an external Docker + // image repository. + DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck" + + // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content. + InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository" + + // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets. + ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret" + + // DockerImageLayersOrderAnnotation describes layers order in the docker image. + DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder" + + // DockerImageLayersOrderAscending indicates that image layers are sorted in + // the order of their addition (from oldest to latest) + DockerImageLayersOrderAscending = "ascending" + + // DockerImageLayersOrderDescending indicates that layers are sorted in + // reversed order of their addition (from newest to oldest). + DockerImageLayersOrderDescending = "descending" + + // ImporterPreferArchAnnotation represents an architecture that should be + // selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch" + + // ImporterPreferOSAnnotation represents an operation system that should + // be selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os" + + // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on + // storage of integrated Docker registry. + ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored" + + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" + + // ResourceImageStreams represents a number of image streams in a project. + ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" + + // ResourceImageStreamImages represents a number of unique references to images in all image stream + // statuses of a project. + ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" + + // ResourceImageStreamTags represents a number of unique references to images in all image stream specs + // of a project. + ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" + + // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set + // the maximum size of an image. + LimitTypeImage corev1.LimitType = "openshift.io/Image" + + // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number + // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". + LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" + + // The supported type of image signature. + ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1" +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/doc.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/doc.go new file mode 100644 index 000000000..e57d45bbf --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=image.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.pb.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.pb.go new file mode 100644 index 000000000..ac776ad64 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -0,0 +1,11572 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/image/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *DockerImageReference) Reset() { *m = DockerImageReference{} } +func (*DockerImageReference) ProtoMessage() {} +func (*DockerImageReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{0} +} +func (m *DockerImageReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerImageReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerImageReference.Merge(m, src) +} +func (m *DockerImageReference) XXX_Size() int { + return m.Size() +} +func (m *DockerImageReference) XXX_DiscardUnknown() { + xxx_messageInfo_DockerImageReference.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{1} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } +func (*ImageBlobReferences) ProtoMessage() {} +func (*ImageBlobReferences) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{2} +} +func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageBlobReferences) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageBlobReferences.Merge(m, src) +} +func (m *ImageBlobReferences) XXX_Size() int { + return m.Size() +} +func (m *ImageBlobReferences) XXX_DiscardUnknown() { + xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo + +func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } +func (*ImageImportSpec) ProtoMessage() {} +func (*ImageImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{3} +} +func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportSpec.Merge(m, src) +} +func (m *ImageImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo + +func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } +func (*ImageImportStatus) ProtoMessage() {} +func (*ImageImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{4} +} +func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportStatus.Merge(m, src) +} +func (m *ImageImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{5} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } +func (*ImageLayerData) ProtoMessage() {} +func (*ImageLayerData) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{6} +} +func (m *ImageLayerData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayerData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayerData.Merge(m, src) +} +func (m *ImageLayerData) XXX_Size() int { + return m.Size() +} +func (m *ImageLayerData) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayerData.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo + +func (m *ImageList) Reset() { *m = ImageList{} } +func (*ImageList) ProtoMessage() {} +func (*ImageList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{7} +} +func (m *ImageList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageList.Merge(m, src) +} +func (m *ImageList) XXX_Size() int { + return m.Size() +} +func (m *ImageList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageList proto.InternalMessageInfo + +func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } +func (*ImageLookupPolicy) ProtoMessage() {} +func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{8} +} +func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLookupPolicy.Merge(m, src) +} +func (m *ImageLookupPolicy) XXX_Size() int { + return m.Size() +} +func (m *ImageLookupPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo + +func (m *ImageManifest) Reset() { *m = ImageManifest{} } +func (*ImageManifest) ProtoMessage() {} +func (*ImageManifest) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{9} +} +func (m *ImageManifest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageManifest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageManifest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageManifest.Merge(m, src) +} +func (m *ImageManifest) XXX_Size() int { + return m.Size() +} +func (m *ImageManifest) XXX_DiscardUnknown() { + xxx_messageInfo_ImageManifest.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageManifest proto.InternalMessageInfo + +func (m *ImageSignature) Reset() { *m = ImageSignature{} } +func (*ImageSignature) ProtoMessage() {} +func (*ImageSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{10} +} +func (m *ImageSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSignature.Merge(m, src) +} +func (m *ImageSignature) XXX_Size() int { + return m.Size() +} +func (m *ImageSignature) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSignature.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSignature proto.InternalMessageInfo + +func (m *ImageStream) Reset() { *m = ImageStream{} } +func (*ImageStream) ProtoMessage() {} +func (*ImageStream) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{11} +} +func (m *ImageStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStream.Merge(m, src) +} +func (m *ImageStream) XXX_Size() int { + return m.Size() +} +func (m *ImageStream) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStream.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStream proto.InternalMessageInfo + +func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } +func (*ImageStreamImage) ProtoMessage() {} +func (*ImageStreamImage) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{12} +} +func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImage.Merge(m, src) +} +func (m *ImageStreamImage) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImage) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo + +func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } +func (*ImageStreamImport) ProtoMessage() {} +func (*ImageStreamImport) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{13} +} +func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImport) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImport.Merge(m, src) +} +func (m *ImageStreamImport) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImport) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImport.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo + +func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } +func (*ImageStreamImportSpec) ProtoMessage() {} +func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{14} +} +func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportSpec.Merge(m, src) +} +func (m *ImageStreamImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo + +func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } +func (*ImageStreamImportStatus) ProtoMessage() {} +func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{15} +} +func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportStatus.Merge(m, src) +} +func (m *ImageStreamImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo + +func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } +func (*ImageStreamLayers) ProtoMessage() {} +func (*ImageStreamLayers) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{16} +} +func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamLayers) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamLayers.Merge(m, src) +} +func (m *ImageStreamLayers) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamLayers) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo + +func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } +func (*ImageStreamList) ProtoMessage() {} +func (*ImageStreamList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{17} +} +func (m *ImageStreamList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamList.Merge(m, src) +} +func (m *ImageStreamList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo + +func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } +func (*ImageStreamMapping) ProtoMessage() {} +func (*ImageStreamMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{18} +} +func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamMapping.Merge(m, src) +} +func (m *ImageStreamMapping) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamMapping) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo + +func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } +func (*ImageStreamSpec) ProtoMessage() {} +func (*ImageStreamSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{19} +} +func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamSpec.Merge(m, src) +} +func (m *ImageStreamSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo + +func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } +func (*ImageStreamStatus) ProtoMessage() {} +func (*ImageStreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{20} +} +func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamStatus.Merge(m, src) +} +func (m *ImageStreamStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo + +func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } +func (*ImageStreamTag) ProtoMessage() {} +func (*ImageStreamTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{21} +} +func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTag.Merge(m, src) +} +func (m *ImageStreamTag) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo + +func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } +func (*ImageStreamTagList) ProtoMessage() {} +func (*ImageStreamTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{22} +} +func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagList.Merge(m, src) +} +func (m *ImageStreamTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo + +func (m *ImageTag) Reset() { *m = ImageTag{} } +func (*ImageTag) ProtoMessage() {} +func (*ImageTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{23} +} +func (m *ImageTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTag.Merge(m, src) +} +func (m *ImageTag) XXX_Size() int { + return m.Size() +} +func (m *ImageTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTag proto.InternalMessageInfo + +func (m *ImageTagList) Reset() { *m = ImageTagList{} } +func (*ImageTagList) ProtoMessage() {} +func (*ImageTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{24} +} +func (m *ImageTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTagList.Merge(m, src) +} +func (m *ImageTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTagList proto.InternalMessageInfo + +func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } +func (*NamedTagEventList) ProtoMessage() {} +func (*NamedTagEventList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{25} +} +func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedTagEventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTagEventList.Merge(m, src) +} +func (m *NamedTagEventList) XXX_Size() int { + return m.Size() +} +func (m *NamedTagEventList) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTagEventList.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo + +func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } +func (*RepositoryImportSpec) ProtoMessage() {} +func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{26} +} +func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportSpec.Merge(m, src) +} +func (m *RepositoryImportSpec) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo + +func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } +func (*RepositoryImportStatus) ProtoMessage() {} +func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{27} +} +func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportStatus.Merge(m, src) +} +func (m *RepositoryImportStatus) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{28} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } +func (*SignatureCondition) ProtoMessage() {} +func (*SignatureCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{29} +} +func (m *SignatureCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureCondition.Merge(m, src) +} +func (m *SignatureCondition) XXX_Size() int { + return m.Size() +} +func (m *SignatureCondition) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo + +func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } +func (*SignatureGenericEntity) ProtoMessage() {} +func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{30} +} +func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureGenericEntity.Merge(m, src) +} +func (m *SignatureGenericEntity) XXX_Size() int { + return m.Size() +} +func (m *SignatureGenericEntity) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo + +func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } +func (*SignatureIssuer) ProtoMessage() {} +func (*SignatureIssuer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{31} +} +func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureIssuer) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureIssuer.Merge(m, src) +} +func (m *SignatureIssuer) XXX_Size() int { + return m.Size() +} +func (m *SignatureIssuer) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureIssuer.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo + +func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } +func (*SignatureSubject) ProtoMessage() {} +func (*SignatureSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{32} +} +func (m *SignatureSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureSubject.Merge(m, src) +} +func (m *SignatureSubject) XXX_Size() int { + return m.Size() +} +func (m *SignatureSubject) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo + +func (m *TagEvent) Reset() { *m = TagEvent{} } +func (*TagEvent) ProtoMessage() {} +func (*TagEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{33} +} +func (m *TagEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEvent.Merge(m, src) +} +func (m *TagEvent) XXX_Size() int { + return m.Size() +} +func (m *TagEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TagEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEvent proto.InternalMessageInfo + +func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } +func (*TagEventCondition) ProtoMessage() {} +func (*TagEventCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{34} +} +func (m *TagEventCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEventCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEventCondition.Merge(m, src) +} +func (m *TagEventCondition) XXX_Size() int { + return m.Size() +} +func (m *TagEventCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TagEventCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo + +func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } +func (*TagImportPolicy) ProtoMessage() {} +func (*TagImportPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{35} +} +func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImportPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImportPolicy.Merge(m, src) +} +func (m *TagImportPolicy) XXX_Size() int { + return m.Size() +} +func (m *TagImportPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagImportPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo + +func (m *TagReference) Reset() { *m = TagReference{} } +func (*TagReference) ProtoMessage() {} +func (*TagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{36} +} +func (m *TagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReference.Merge(m, src) +} +func (m *TagReference) XXX_Size() int { + return m.Size() +} +func (m *TagReference) XXX_DiscardUnknown() { + xxx_messageInfo_TagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReference proto.InternalMessageInfo + +func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } +func (*TagReferencePolicy) ProtoMessage() {} +func (*TagReferencePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{37} +} +func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReferencePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReferencePolicy.Merge(m, src) +} +func (m *TagReferencePolicy) XXX_Size() int { + return m.Size() +} +func (m *TagReferencePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*DockerImageReference)(nil), "git.colasdn.top.openshift.api.image.v1.DockerImageReference") + proto.RegisterType((*Image)(nil), "git.colasdn.top.openshift.api.image.v1.Image") + proto.RegisterType((*ImageBlobReferences)(nil), "git.colasdn.top.openshift.api.image.v1.ImageBlobReferences") + proto.RegisterType((*ImageImportSpec)(nil), "git.colasdn.top.openshift.api.image.v1.ImageImportSpec") + proto.RegisterType((*ImageImportStatus)(nil), "git.colasdn.top.openshift.api.image.v1.ImageImportStatus") + proto.RegisterType((*ImageLayer)(nil), "git.colasdn.top.openshift.api.image.v1.ImageLayer") + proto.RegisterType((*ImageLayerData)(nil), "git.colasdn.top.openshift.api.image.v1.ImageLayerData") + proto.RegisterType((*ImageList)(nil), "git.colasdn.top.openshift.api.image.v1.ImageList") + proto.RegisterType((*ImageLookupPolicy)(nil), "git.colasdn.top.openshift.api.image.v1.ImageLookupPolicy") + proto.RegisterType((*ImageManifest)(nil), "git.colasdn.top.openshift.api.image.v1.ImageManifest") + proto.RegisterType((*ImageSignature)(nil), "git.colasdn.top.openshift.api.image.v1.ImageSignature") + proto.RegisterMapType((map[string]string)(nil), "git.colasdn.top.openshift.api.image.v1.ImageSignature.SignedClaimsEntry") + proto.RegisterType((*ImageStream)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStream") + proto.RegisterType((*ImageStreamImage)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamImage") + proto.RegisterType((*ImageStreamImport)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamImport") + proto.RegisterType((*ImageStreamImportSpec)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamImportSpec") + proto.RegisterType((*ImageStreamImportStatus)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamImportStatus") + proto.RegisterType((*ImageStreamLayers)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamLayers") + proto.RegisterMapType((map[string]ImageLayerData)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamLayers.BlobsEntry") + proto.RegisterMapType((map[string]ImageBlobReferences)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamLayers.ImagesEntry") + proto.RegisterType((*ImageStreamList)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamList") + proto.RegisterType((*ImageStreamMapping)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamMapping") + proto.RegisterType((*ImageStreamSpec)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamSpec") + proto.RegisterType((*ImageStreamStatus)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamStatus") + proto.RegisterType((*ImageStreamTag)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamTag") + proto.RegisterType((*ImageStreamTagList)(nil), "git.colasdn.top.openshift.api.image.v1.ImageStreamTagList") + proto.RegisterType((*ImageTag)(nil), "git.colasdn.top.openshift.api.image.v1.ImageTag") + proto.RegisterType((*ImageTagList)(nil), "git.colasdn.top.openshift.api.image.v1.ImageTagList") + proto.RegisterType((*NamedTagEventList)(nil), "git.colasdn.top.openshift.api.image.v1.NamedTagEventList") + proto.RegisterType((*RepositoryImportSpec)(nil), "git.colasdn.top.openshift.api.image.v1.RepositoryImportSpec") + proto.RegisterType((*RepositoryImportStatus)(nil), "git.colasdn.top.openshift.api.image.v1.RepositoryImportStatus") + proto.RegisterType((*SecretList)(nil), "git.colasdn.top.openshift.api.image.v1.SecretList") + proto.RegisterType((*SignatureCondition)(nil), "git.colasdn.top.openshift.api.image.v1.SignatureCondition") + proto.RegisterType((*SignatureGenericEntity)(nil), "git.colasdn.top.openshift.api.image.v1.SignatureGenericEntity") + proto.RegisterType((*SignatureIssuer)(nil), "git.colasdn.top.openshift.api.image.v1.SignatureIssuer") + proto.RegisterType((*SignatureSubject)(nil), "git.colasdn.top.openshift.api.image.v1.SignatureSubject") + proto.RegisterType((*TagEvent)(nil), "git.colasdn.top.openshift.api.image.v1.TagEvent") + proto.RegisterType((*TagEventCondition)(nil), "git.colasdn.top.openshift.api.image.v1.TagEventCondition") + proto.RegisterType((*TagImportPolicy)(nil), "git.colasdn.top.openshift.api.image.v1.TagImportPolicy") + proto.RegisterType((*TagReference)(nil), "git.colasdn.top.openshift.api.image.v1.TagReference") + proto.RegisterMapType((map[string]string)(nil), "git.colasdn.top.openshift.api.image.v1.TagReference.AnnotationsEntry") + proto.RegisterType((*TagReferencePolicy)(nil), "git.colasdn.top.openshift.api.image.v1.TagReferencePolicy") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptor_650a0b34f65fde60) +} + +var fileDescriptor_650a0b34f65fde60 = []byte{ + // 2691 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xf6, 0xf2, 0x4f, 0xd4, 0x13, 0x25, 0x59, 0x63, 0xcb, 0x61, 0x68, 0x47, 0x92, 0xd7, 0xb5, + 0xe1, 0x34, 0x0e, 0x19, 0xa9, 0x4e, 0x2a, 0xbb, 0x40, 0x1d, 0xd3, 0x74, 0x0d, 0xb6, 0x62, 0xac, + 0x8c, 0x58, 0xa3, 0x35, 0x5c, 0xa0, 0xab, 0xe5, 0x68, 0xb5, 0x15, 0xb9, 0xcb, 0xee, 0x2e, 0x95, + 0xc8, 0x68, 0x81, 0xa2, 0x28, 0x82, 0x1c, 0x7a, 0x68, 0xcf, 0x39, 0x16, 0x41, 0x51, 0x14, 0xe8, + 0xa5, 0x68, 0xd0, 0x53, 0x2f, 0x4d, 0x01, 0xa3, 0xa7, 0x20, 0xe8, 0x21, 0x97, 0x0a, 0xb1, 0xda, + 0x73, 0x6f, 0xbd, 0xf8, 0x54, 0xcc, 0xcf, 0xfe, 0x72, 0x29, 0xed, 0xaa, 0x16, 0xdb, 0xdc, 0xc8, + 0x79, 0xef, 0x7d, 0x6f, 0xe6, 0xbd, 0x37, 0xef, 0xbd, 0x99, 0x59, 0x58, 0xd6, 0x74, 0x67, 0x7b, + 0xb0, 0x59, 0x55, 0xcd, 0x5e, 0xcd, 0xec, 0x13, 0xc3, 0xde, 0xd6, 0xb7, 0x9c, 0x9a, 0xd2, 0xd7, + 0x6b, 0x7a, 0x4f, 0xd1, 0x48, 0x6d, 0x77, 0xb9, 0xa6, 0x11, 0x83, 0x58, 0x8a, 0x43, 0x3a, 0xd5, + 0xbe, 0x65, 0x3a, 0x26, 0xba, 0xe8, 0x8b, 0x54, 0x3d, 0x91, 0xaa, 0xd2, 0xd7, 0xab, 0x4c, 0xa4, + 0xba, 0xbb, 0x5c, 0x79, 0x35, 0x80, 0xaa, 0x99, 0x9a, 0x59, 0x63, 0x92, 0x9b, 0x83, 0x2d, 0xf6, + 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x62, 0x45, 0xde, 0x59, 0xb5, 0xab, 0xba, 0xc9, 0xd4, 0xaa, 0xa6, + 0x15, 0xa7, 0xb5, 0x72, 0xdd, 0xe7, 0xe9, 0x29, 0xea, 0xb6, 0x6e, 0x10, 0x6b, 0xaf, 0xd6, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xeb, 0x11, 0x47, 0x89, 0x93, 0xaa, 0x8d, 0x92, 0xb2, 0x06, 0x86, 0xa3, + 0xf7, 0xc8, 0x90, 0xc0, 0x1b, 0x47, 0x09, 0xd8, 0xea, 0x36, 0xe9, 0x29, 0x51, 0x39, 0xf9, 0x53, + 0x09, 0xce, 0x36, 0x4c, 0x75, 0x87, 0x58, 0x4d, 0x6a, 0x04, 0x4c, 0xb6, 0x88, 0x45, 0x0c, 0x95, + 0xa0, 0x6b, 0x50, 0xb4, 0x88, 0xa6, 0xdb, 0x8e, 0xb5, 0x57, 0x96, 0x96, 0xa4, 0xab, 0x93, 0xf5, + 0xd3, 0x4f, 0xf6, 0x17, 0x4f, 0x1d, 0xec, 0x2f, 0x16, 0xb1, 0x18, 0xc7, 0x1e, 0x07, 0xaa, 0xc1, + 0xa4, 0xa1, 0xf4, 0x88, 0xdd, 0x57, 0x54, 0x52, 0xce, 0x30, 0xf6, 0x39, 0xc1, 0x3e, 0xf9, 0x96, + 0x4b, 0xc0, 0x3e, 0x0f, 0x5a, 0x82, 0x1c, 0xfd, 0x53, 0xce, 0x32, 0xde, 0x92, 0xe0, 0xcd, 0x51, + 0x5e, 0xcc, 0x28, 0xe8, 0x25, 0xc8, 0x3a, 0x8a, 0x56, 0xce, 0x31, 0x86, 0x29, 0xc1, 0x90, 0x6d, + 0x2b, 0x1a, 0xa6, 0xe3, 0xa8, 0x02, 0x19, 0xbd, 0x51, 0xce, 0x33, 0x2a, 0x08, 0x6a, 0xa6, 0xd9, + 0xc0, 0x19, 0xbd, 0x21, 0xff, 0xad, 0x08, 0x79, 0xb6, 0x1c, 0xf4, 0x7d, 0x28, 0x52, 0x13, 0x77, + 0x14, 0x47, 0x61, 0xab, 0x98, 0x5a, 0x79, 0xad, 0xca, 0x2d, 0x55, 0x0d, 0x5a, 0xaa, 0xda, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xa5, 0xdc, 0xd5, 0xdd, 0xe5, 0xea, 0xfd, 0xcd, 0x1f, 0x10, 0xd5, 0x69, + 0x11, 0x47, 0xa9, 0x23, 0x81, 0x0e, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x3a, 0x9c, 0xed, 0xc4, 0xd8, + 0x4f, 0x18, 0xe1, 0x82, 0x90, 0x8d, 0xb5, 0x31, 0x8e, 0x95, 0x44, 0x3f, 0x82, 0x33, 0x81, 0xf1, + 0x96, 0x3b, 0xfd, 0x2c, 0x9b, 0xfe, 0xab, 0x23, 0xa7, 0x2f, 0x1c, 0x5d, 0xc5, 0xca, 0x3b, 0x77, + 0xdf, 0x75, 0x88, 0x61, 0xeb, 0xa6, 0x51, 0x3f, 0x2f, 0xf4, 0x9f, 0x69, 0x0c, 0x23, 0xe2, 0x38, + 0x35, 0x68, 0x13, 0x2a, 0x31, 0xc3, 0x0f, 0x88, 0x45, 0xf1, 0x84, 0x37, 0x64, 0x81, 0x5a, 0x69, + 0x8c, 0xe4, 0xc4, 0x87, 0xa0, 0xa0, 0x56, 0x78, 0x85, 0x8a, 0xa1, 0x6f, 0x11, 0xdb, 0x11, 0xce, + 0x8c, 0x9d, 0xb2, 0x60, 0xc1, 0x71, 0x72, 0x68, 0x17, 0xe6, 0x02, 0xc3, 0x6b, 0xca, 0x1e, 0xb1, + 0xec, 0x72, 0x61, 0x29, 0xcb, 0xcc, 0x75, 0xe4, 0xa6, 0xaf, 0xfa, 0x52, 0xf5, 0x17, 0x85, 0xee, + 0xb9, 0x46, 0x14, 0x0f, 0x0f, 0xab, 0x40, 0x04, 0xc0, 0xd6, 0x35, 0x43, 0x71, 0x06, 0x16, 0xb1, + 0xcb, 0x13, 0x4c, 0xe1, 0x72, 0x52, 0x85, 0x1b, 0xae, 0xa4, 0x1f, 0x5f, 0xde, 0x90, 0x8d, 0x03, + 0xc0, 0xe8, 0x3e, 0xcc, 0x07, 0x74, 0xfb, 0x4c, 0xe5, 0xe2, 0x52, 0xf6, 0x6a, 0xa9, 0xfe, 0xe2, + 0xc1, 0xfe, 0xe2, 0x7c, 0x23, 0x8e, 0x01, 0xc7, 0xcb, 0xa1, 0x6d, 0xb8, 0x10, 0x63, 0xc6, 0x16, + 0xe9, 0xe8, 0x4a, 0x7b, 0xaf, 0x4f, 0xca, 0x93, 0xcc, 0x0f, 0x5f, 0x12, 0xd3, 0xba, 0xd0, 0x38, + 0x84, 0x17, 0x1f, 0x8a, 0x84, 0xee, 0x85, 0x3c, 0x73, 0xc7, 0x34, 0xb6, 0x74, 0xad, 0x0c, 0x0c, + 0x3e, 0xce, 0xd4, 0x9c, 0x01, 0x0f, 0xcb, 0xa0, 0x9f, 0x4a, 0xa1, 0x6d, 0xe6, 0x6a, 0xb2, 0xcb, + 0x53, 0xcc, 0xea, 0xaf, 0x25, 0xb5, 0xba, 0x2b, 0x18, 0xbb, 0x31, 0x3d, 0x54, 0x1c, 0xab, 0x4b, + 0xfe, 0x58, 0x82, 0x33, 0x6c, 0xa8, 0xde, 0x35, 0x37, 0xbd, 0xfd, 0x6a, 0xa3, 0x55, 0x28, 0x31, + 0x2d, 0x2d, 0xdd, 0xb6, 0x75, 0x43, 0x63, 0x3b, 0xb5, 0x58, 0x3f, 0x2b, 0x34, 0x94, 0x9a, 0x01, + 0x1a, 0x0e, 0x71, 0x22, 0x19, 0x0a, 0x5d, 0x1e, 0xae, 0xd2, 0x52, 0x96, 0x26, 0xb2, 0x83, 0xfd, + 0xc5, 0x82, 0x08, 0x38, 0x41, 0xa1, 0x3c, 0x2a, 0x37, 0x1c, 0x4f, 0x29, 0x8c, 0x47, 0x58, 0x4a, + 0x50, 0xd0, 0x2b, 0x30, 0xd9, 0xf3, 0x4c, 0x92, 0x63, 0x50, 0xd3, 0x34, 0xf5, 0xfa, 0x2b, 0xf2, + 0xe9, 0xf2, 0x5f, 0xb2, 0x30, 0xcb, 0xe6, 0xd4, 0xec, 0xf5, 0x4d, 0xcb, 0xd9, 0xe8, 0x13, 0x15, + 0xdd, 0x85, 0xdc, 0x96, 0x65, 0xf6, 0x44, 0x8e, 0xbc, 0x14, 0x48, 0x32, 0x55, 0x5a, 0xd8, 0xfc, + 0x8c, 0xe8, 0x2d, 0xdb, 0xcf, 0xd9, 0xdf, 0xb0, 0xcc, 0x1e, 0x66, 0xe2, 0xe8, 0x4d, 0xc8, 0x38, + 0x26, 0x9b, 0xe7, 0xd4, 0xca, 0xd5, 0x38, 0x90, 0x35, 0x53, 0x55, 0xba, 0x51, 0xa4, 0x02, 0x4d, + 0xdd, 0x6d, 0x13, 0x67, 0x1c, 0x13, 0x75, 0xa9, 0x2d, 0xe9, 0xb4, 0xd6, 0xcd, 0xae, 0xae, 0xee, + 0x89, 0xac, 0xb7, 0x92, 0xc0, 0xbf, 0x6d, 0x45, 0x6b, 0x06, 0x24, 0x83, 0xf6, 0xf7, 0x47, 0x71, + 0x08, 0x1d, 0xbd, 0x0b, 0xb3, 0x96, 0x3b, 0x0d, 0xa1, 0x30, 0xcf, 0x14, 0xbe, 0x9e, 0x4c, 0x21, + 0x0e, 0x0b, 0xd7, 0x5f, 0x10, 0x3a, 0x67, 0x23, 0x04, 0x1c, 0x55, 0x83, 0x6e, 0xc3, 0xac, 0x6e, + 0xa8, 0xdd, 0x41, 0xc7, 0x4f, 0x7f, 0x39, 0x16, 0x36, 0x1e, 0x44, 0x33, 0x4c, 0xc6, 0x51, 0x7e, + 0xf9, 0x77, 0x19, 0x98, 0x0b, 0xfa, 0xd1, 0x51, 0x9c, 0x81, 0x8d, 0xda, 0x50, 0xb0, 0xd9, 0x2f, + 0xe1, 0xcb, 0x6b, 0xc9, 0xea, 0x1d, 0x97, 0xae, 0xcf, 0x08, 0xed, 0x05, 0xfe, 0x1f, 0x0b, 0x2c, + 0xd4, 0x84, 0x3c, 0x5b, 0xb7, 0xe7, 0xdb, 0x84, 0xfb, 0xad, 0x3e, 0x79, 0xb0, 0xbf, 0xc8, 0x6b, + 0x31, 0xe6, 0x08, 0x6e, 0x5d, 0xcf, 0x8e, 0xa8, 0xeb, 0xdf, 0x8d, 0x86, 0x72, 0x1a, 0x6d, 0x5e, + 0xcf, 0x11, 0x1b, 0xf8, 0xef, 0x49, 0x00, 0x7e, 0xfe, 0xf6, 0x5a, 0x10, 0x69, 0x64, 0x0b, 0x72, + 0x19, 0x72, 0xb6, 0xfe, 0x98, 0x2f, 0x3a, 0xeb, 0x83, 0x33, 0xf1, 0x0d, 0xfd, 0x31, 0xc1, 0x8c, + 0x4c, 0x9b, 0x9f, 0x9e, 0x97, 0x3c, 0xb3, 0xe1, 0xe6, 0xc7, 0xcf, 0x94, 0x3e, 0x8f, 0xdc, 0x81, + 0x19, 0x7f, 0x1e, 0x0d, 0x5a, 0x75, 0x2f, 0x0a, 0x4d, 0x12, 0xd3, 0x34, 0x7d, 0xa4, 0x96, 0x4c, + 0x02, 0x2d, 0x7f, 0x94, 0x60, 0x92, 0xab, 0xd1, 0x6d, 0x07, 0x3d, 0x1a, 0xea, 0x84, 0xaa, 0xc9, + 0x22, 0x83, 0x4a, 0xb3, 0x3e, 0xc8, 0xeb, 0xff, 0xdc, 0x91, 0x40, 0x17, 0xd4, 0x82, 0xbc, 0xee, + 0x90, 0x9e, 0x5d, 0xce, 0xa4, 0xf4, 0xd8, 0xb4, 0x00, 0xcd, 0x37, 0xa9, 0x38, 0xe6, 0x28, 0xf2, + 0xaa, 0x88, 0xec, 0x35, 0xd3, 0xdc, 0x19, 0xf4, 0xc5, 0x96, 0xb9, 0x04, 0xf9, 0x2e, 0x4d, 0x1f, + 0x22, 0xbf, 0x7a, 0x92, 0x2c, 0xa7, 0x60, 0x4e, 0x93, 0x7f, 0x95, 0x81, 0xe9, 0x70, 0x77, 0x70, + 0x05, 0x0a, 0x1d, 0x5d, 0xa3, 0x1b, 0x8c, 0x3b, 0xda, 0x0b, 0xf1, 0x06, 0x1b, 0xc5, 0x82, 0x9a, + 0xda, 0xbe, 0x34, 0xed, 0xbb, 0xb1, 0x45, 0xdd, 0xc4, 0xa6, 0x95, 0xf5, 0xd3, 0x4e, 0x2b, 0x40, + 0xc3, 0x21, 0x4e, 0x2a, 0xa9, 0x58, 0xea, 0xb6, 0xee, 0x10, 0x95, 0x56, 0x64, 0xd1, 0x55, 0x79, + 0x92, 0xb7, 0x03, 0x34, 0x1c, 0xe2, 0xa4, 0x5d, 0xaf, 0x69, 0x47, 0xbb, 0xde, 0xfb, 0x1b, 0x38, + 0x63, 0xda, 0xe8, 0x65, 0x98, 0xd8, 0x55, 0x2c, 0x5d, 0x31, 0x9c, 0x72, 0x81, 0x31, 0xcc, 0x0a, + 0x86, 0x89, 0x07, 0x7c, 0x18, 0xbb, 0x74, 0xf9, 0xf7, 0x05, 0x11, 0x81, 0x5e, 0x57, 0x30, 0x86, + 0x4e, 0x79, 0x09, 0x72, 0x8e, 0x6f, 0x5b, 0x6f, 0xbf, 0x31, 0xb3, 0x32, 0x0a, 0xba, 0x0c, 0x13, + 0xaa, 0x69, 0x38, 0xc4, 0x70, 0x98, 0x31, 0x4b, 0xf5, 0x29, 0x3a, 0xfb, 0x3b, 0x7c, 0x08, 0xbb, + 0x34, 0xa4, 0x03, 0xa8, 0xa6, 0xd1, 0xd1, 0x1d, 0xdd, 0x34, 0xdc, 0x1c, 0x91, 0x24, 0x61, 0x7b, + 0x8b, 0xbd, 0xe3, 0x4a, 0xfb, 0x33, 0xf6, 0x86, 0x6c, 0x1c, 0x00, 0x47, 0x5f, 0x83, 0x69, 0x26, + 0xde, 0xec, 0x10, 0xc3, 0xd1, 0x9d, 0x3d, 0x61, 0xfa, 0x79, 0x21, 0xc6, 0x43, 0xcd, 0x25, 0xe2, + 0x30, 0x2f, 0xfa, 0x31, 0x94, 0x68, 0x1b, 0x47, 0x3a, 0x77, 0xba, 0x8a, 0xde, 0x73, 0x5b, 0xd2, + 0x3b, 0xa9, 0x3b, 0x44, 0x36, 0x71, 0x17, 0xe5, 0xae, 0xe1, 0x58, 0x81, 0xe2, 0x16, 0x24, 0xe1, + 0x90, 0x3a, 0xf4, 0x36, 0x4c, 0xa8, 0x16, 0xa1, 0x67, 0xbd, 0xf2, 0x04, 0x73, 0xe8, 0x97, 0x93, + 0x39, 0xb4, 0xad, 0xf7, 0x88, 0xb0, 0x3c, 0x17, 0xc7, 0x2e, 0x0e, 0x4d, 0x22, 0xba, 0x6d, 0x0f, + 0x48, 0xa7, 0xbe, 0x57, 0x2e, 0x26, 0xae, 0xcc, 0xde, 0x42, 0x9a, 0x54, 0xd6, 0xaa, 0x97, 0x68, + 0x12, 0x69, 0x0a, 0x1c, 0xec, 0x21, 0xa2, 0xef, 0xb9, 0xe8, 0x6d, 0x93, 0xf5, 0xa0, 0x53, 0x2b, + 0x5f, 0x49, 0x83, 0xbe, 0x31, 0x60, 0x51, 0x17, 0x84, 0x6f, 0x9b, 0xd8, 0x83, 0xac, 0xdc, 0x82, + 0xb9, 0x21, 0x43, 0xa2, 0xd3, 0x90, 0xdd, 0x21, 0xe2, 0x84, 0x8b, 0xe9, 0x4f, 0x74, 0x16, 0xf2, + 0xbb, 0x4a, 0x77, 0x20, 0xe2, 0x14, 0xf3, 0x3f, 0x37, 0x33, 0xab, 0x12, 0xcd, 0x2d, 0x53, 0xdc, + 0x33, 0x8e, 0x45, 0x94, 0xde, 0x18, 0xb6, 0x4c, 0x1b, 0x72, 0x76, 0x9f, 0xa8, 0xa2, 0xea, 0xae, + 0x24, 0x8e, 0x1c, 0x36, 0x3f, 0xda, 0xd8, 0xf9, 0xdb, 0x8c, 0xfe, 0xc3, 0x0c, 0x0d, 0x3d, 0xf2, + 0x5a, 0x04, 0xde, 0x5d, 0x5d, 0x4f, 0x89, 0x7b, 0x68, 0xab, 0x20, 0xff, 0x59, 0x82, 0xd3, 0x01, + 0xee, 0x71, 0x9d, 0xc3, 0x5b, 0xc7, 0xed, 0x50, 0xfc, 0x0a, 0x14, 0xe8, 0x52, 0xe4, 0x3f, 0xb8, + 0xcd, 0x95, 0xbb, 0x0a, 0xda, 0x62, 0x8d, 0x61, 0x19, 0x0f, 0x43, 0x1e, 0x5f, 0x4d, 0xe7, 0x19, + 0xbf, 0xa1, 0x8f, 0xf5, 0xfb, 0x66, 0xc4, 0xef, 0x37, 0x8f, 0x85, 0x7e, 0xb8, 0xf7, 0x7f, 0x96, + 0x81, 0xf9, 0xd8, 0x19, 0xd1, 0x3a, 0xcc, 0x7b, 0x6f, 0x66, 0xb9, 0xa2, 0x8f, 0xc0, 0x79, 0xb0, + 0xa0, 0x22, 0x0d, 0xc0, 0x22, 0x7d, 0xd3, 0xd6, 0x1d, 0xd3, 0xda, 0x13, 0x76, 0xf8, 0x6a, 0x82, + 0x99, 0x62, 0x4f, 0x28, 0x60, 0x86, 0x19, 0x6a, 0x68, 0x9f, 0x82, 0x03, 0xd0, 0xe8, 0x21, 0x9d, + 0x90, 0xa2, 0x11, 0x6a, 0x8e, 0x6c, 0x9a, 0xed, 0x15, 0xc4, 0xf7, 0x17, 0x41, 0x91, 0xb0, 0x40, + 0x94, 0x3f, 0xca, 0xc0, 0x0b, 0x23, 0x4c, 0x87, 0x70, 0xc8, 0x10, 0xb4, 0x0f, 0x4b, 0xe5, 0x06, + 0x7e, 0x00, 0x8c, 0x18, 0x4d, 0x8f, 0x31, 0xda, 0x8d, 0xe3, 0x18, 0x4d, 0x78, 0xf7, 0x10, 0xb3, + 0x3d, 0x8a, 0x98, 0xed, 0x7a, 0x4a, 0xb3, 0x45, 0xe2, 0x27, 0x62, 0xb8, 0x0f, 0x73, 0xa1, 0x7d, + 0x27, 0x6e, 0x5a, 0x4e, 0x7e, 0xdf, 0x75, 0x20, 0xbf, 0xd9, 0x35, 0x37, 0xdd, 0x06, 0xf6, 0x56, + 0x3a, 0x9f, 0xf0, 0x69, 0x56, 0xeb, 0x14, 0x81, 0x17, 0x68, 0x2f, 0xab, 0xb0, 0x31, 0xcc, 0xc1, + 0xd1, 0x76, 0xc4, 0x76, 0x6f, 0x1e, 0x4b, 0x0d, 0x37, 0x19, 0xd7, 0x33, 0xc2, 0x8e, 0x95, 0x1d, + 0x00, 0x7f, 0x36, 0x31, 0x55, 0xee, 0x5e, 0xb0, 0xca, 0xa5, 0xb8, 0xb6, 0xf2, 0x8e, 0x2c, 0x81, + 0xc2, 0x58, 0xf9, 0xa1, 0xa8, 0x8b, 0x23, 0xb5, 0xad, 0x85, 0xb5, 0xbd, 0x91, 0x38, 0x39, 0x87, + 0x2e, 0x5a, 0x82, 0xb5, 0xf8, 0x63, 0x49, 0x5c, 0x62, 0x08, 0xcb, 0x9c, 0xfc, 0x11, 0x67, 0x23, + 0x7c, 0xc4, 0x49, 0xbb, 0x6b, 0xe3, 0x0f, 0x3a, 0xff, 0x94, 0x00, 0x05, 0xb8, 0x5a, 0x4a, 0xbf, + 0xaf, 0x1b, 0xda, 0x17, 0xae, 0x5c, 0x1e, 0x71, 0xa8, 0x97, 0x7f, 0x93, 0x09, 0x79, 0x8b, 0xd5, + 0x03, 0x03, 0x4a, 0xdd, 0xc0, 0xf1, 0x2e, 0x6d, 0x2f, 0x12, 0x3c, 0x1a, 0xfa, 0xed, 0x70, 0x70, + 0x14, 0x87, 0xf0, 0xd1, 0x46, 0xe8, 0x1a, 0xd5, 0x4f, 0x6e, 0xe2, 0x58, 0xf8, 0x92, 0x80, 0x98, + 0x6f, 0xc4, 0x31, 0xe1, 0x78, 0x59, 0xf4, 0x36, 0xe4, 0x1c, 0x45, 0x73, 0x63, 0xa2, 0x96, 0xf2, + 0xd6, 0x28, 0x70, 0x08, 0x52, 0x34, 0x1b, 0x33, 0x28, 0xf9, 0xd7, 0xe1, 0xce, 0x43, 0x14, 0x8d, + 0x13, 0x99, 0x3d, 0x81, 0xf3, 0xfd, 0xc1, 0x66, 0x57, 0x57, 0x63, 0xa5, 0x84, 0x37, 0x2f, 0x09, + 0xe8, 0xf3, 0xeb, 0xa3, 0x59, 0xf1, 0x61, 0x38, 0xe8, 0x41, 0xc8, 0x48, 0x49, 0x3c, 0xfc, 0x96, + 0xd2, 0x23, 0x9d, 0xb6, 0xa2, 0xdd, 0xdd, 0x25, 0x86, 0x43, 0xf7, 0x62, 0xac, 0xa5, 0x3e, 0xc8, + 0xb9, 0xa7, 0x58, 0x66, 0xa9, 0xb6, 0x32, 0x8e, 0x8d, 0xf3, 0x4d, 0x1e, 0xe9, 0x7c, 0xdb, 0xa4, + 0x76, 0xf8, 0x44, 0xe8, 0xae, 0x6b, 0x05, 0x40, 0xbc, 0xc7, 0xe9, 0xa6, 0x21, 0xee, 0x0f, 0x3c, + 0xed, 0xf7, 0x3c, 0x0a, 0x0e, 0x70, 0x0d, 0x6d, 0x9b, 0xc2, 0x09, 0x6f, 0x9b, 0xed, 0x98, 0xc3, + 0xf6, 0xf5, 0x64, 0xcb, 0x66, 0xde, 0x4b, 0x7e, 0xd6, 0xf6, 0x52, 0x52, 0xfe, 0xb9, 0x74, 0xf0, + 0x7f, 0x0d, 0xa7, 0xd6, 0xb6, 0xa2, 0x8d, 0xa1, 0x48, 0x3c, 0x08, 0x17, 0x89, 0xe5, 0x74, 0x45, + 0xa2, 0xad, 0x68, 0x23, 0xea, 0xc4, 0xe7, 0x19, 0x28, 0x32, 0xc6, 0xf1, 0x04, 0x79, 0x2b, 0x74, + 0x0a, 0x49, 0x1d, 0xe5, 0xc5, 0xc8, 0xc1, 0xe3, 0x3b, 0xc7, 0x38, 0x70, 0x0e, 0xa7, 0x00, 0x38, + 0xec, 0x5e, 0x3a, 0xf7, 0xdf, 0xde, 0x4b, 0xcb, 0x7f, 0x92, 0xa0, 0xe4, 0x9a, 0x78, 0x0c, 0x91, + 0xb2, 0x1e, 0x8e, 0x94, 0x57, 0x92, 0xce, 0x7c, 0x74, 0x8c, 0xfc, 0x4b, 0x82, 0xb9, 0x21, 0xab, + 0xb9, 0x95, 0x59, 0x1a, 0x71, 0xdd, 0x7e, 0x8c, 0x69, 0xb8, 0xf0, 0xf1, 0xd3, 0x88, 0x24, 0x8c, + 0xec, 0xc9, 0x25, 0x0c, 0xf9, 0xfd, 0x2c, 0x9c, 0x8d, 0x3b, 0xf5, 0x3d, 0xaf, 0xd7, 0xac, 0xe8, + 0x5b, 0x54, 0x66, 0xdc, 0x6f, 0x51, 0xb9, 0xff, 0xd9, 0x5b, 0x54, 0x36, 0xe5, 0x5b, 0xd4, 0xfb, + 0x19, 0x38, 0x17, 0x7f, 0x96, 0x3c, 0xa1, 0x07, 0x29, 0xff, 0x14, 0x9a, 0x79, 0xfe, 0xa7, 0x50, + 0x74, 0x13, 0x66, 0x94, 0x0e, 0x0f, 0x33, 0xa5, 0x4b, 0x3b, 0x0e, 0x16, 0xc7, 0x93, 0x75, 0x74, + 0xb0, 0xbf, 0x38, 0x73, 0x3b, 0x44, 0xc1, 0x11, 0x4e, 0xf9, 0xb7, 0x12, 0xc0, 0x06, 0x51, 0x2d, + 0xe2, 0x8c, 0x21, 0x8b, 0xdc, 0x0a, 0x6f, 0xdf, 0x4a, 0x5c, 0xa8, 0xf3, 0xc9, 0x8c, 0x48, 0x1a, + 0x9f, 0x66, 0x01, 0x0d, 0xdf, 0x8b, 0xa3, 0x9b, 0xe2, 0xae, 0x9e, 0xa7, 0x8d, 0x2b, 0xc1, 0xbb, + 0xfa, 0x67, 0xfb, 0x8b, 0xe7, 0x86, 0x25, 0x02, 0xb7, 0xf8, 0x6b, 0x9e, 0xc3, 0xf9, 0x4d, 0xff, + 0xf5, 0xb0, 0x0b, 0x9f, 0xed, 0x2f, 0xc6, 0x7c, 0x37, 0x55, 0xf5, 0x90, 0x22, 0x8e, 0xd6, 0x60, + 0xba, 0xab, 0xd8, 0xce, 0xba, 0x65, 0x6e, 0x92, 0xb6, 0x2e, 0xbe, 0x18, 0x4a, 0x77, 0x97, 0xed, + 0xdd, 0xd6, 0xaf, 0x05, 0x81, 0x70, 0x18, 0x17, 0xed, 0x02, 0xa2, 0x03, 0x6d, 0x4b, 0x31, 0x6c, + 0xbe, 0x24, 0xaa, 0x2d, 0x97, 0x5a, 0x5b, 0x45, 0x68, 0x43, 0x6b, 0x43, 0x68, 0x38, 0x46, 0x03, + 0xba, 0x02, 0x05, 0x8b, 0x28, 0xb6, 0x69, 0x88, 0xb7, 0x05, 0x2f, 0x26, 0x31, 0x1b, 0xc5, 0x82, + 0x8a, 0x5e, 0x86, 0x89, 0x1e, 0xb1, 0x6d, 0x5a, 0xec, 0x22, 0xcf, 0x3b, 0x2d, 0x3e, 0x8c, 0x5d, + 0xba, 0xfc, 0x9e, 0x04, 0xbe, 0x8b, 0x58, 0x1f, 0xa9, 0xab, 0x77, 0xf9, 0x9b, 0xc4, 0x2a, 0x94, + 0x4c, 0x4b, 0x53, 0x0c, 0xfd, 0x31, 0x6f, 0x3a, 0xa5, 0xf0, 0xd3, 0xd3, 0xfd, 0x00, 0x0d, 0x87, + 0x38, 0x69, 0xb3, 0xaa, 0x9a, 0xbd, 0x9e, 0x69, 0xd0, 0x1a, 0x23, 0x5c, 0x1b, 0xc8, 0xd0, 0x2e, + 0x05, 0x07, 0xb8, 0xe4, 0x0f, 0x25, 0x98, 0x8d, 0xdc, 0xfe, 0xa3, 0x5f, 0x4a, 0x70, 0xce, 0x8e, + 0x9d, 0x9c, 0xd8, 0x1f, 0x37, 0xd2, 0x5c, 0xfa, 0x87, 0x00, 0xea, 0x0b, 0x62, 0x3e, 0x23, 0x56, + 0x8f, 0x47, 0x28, 0x96, 0xff, 0x2e, 0xc1, 0xe9, 0xe8, 0x3b, 0xc2, 0xff, 0xe3, 0x44, 0xd1, 0xeb, + 0x30, 0xc5, 0x4f, 0x5a, 0xdf, 0x22, 0x7b, 0xcd, 0x86, 0xf0, 0xc2, 0x19, 0x01, 0x36, 0xb5, 0xee, + 0x93, 0x70, 0x90, 0x4f, 0xfe, 0x79, 0x06, 0x8a, 0x6e, 0x7d, 0x45, 0xdf, 0xf6, 0xdf, 0x85, 0xa4, + 0xd4, 0xd1, 0xed, 0x05, 0xdd, 0xd0, 0xdb, 0xd0, 0xf3, 0xff, 0x10, 0xee, 0x92, 0xdb, 0xdc, 0xf1, + 0x83, 0x68, 0xfc, 0xcd, 0x43, 0xf8, 0x0c, 0x95, 0x4b, 0x72, 0x86, 0x92, 0x3f, 0xc8, 0xc2, 0xdc, + 0x50, 0xbb, 0x81, 0x6e, 0x84, 0x72, 0xde, 0xe5, 0x48, 0xce, 0x9b, 0x1f, 0x12, 0x38, 0xb1, 0x94, + 0x17, 0x9f, 0x89, 0xb2, 0x63, 0xcc, 0x44, 0xb9, 0xa4, 0x99, 0x28, 0x7f, 0x78, 0x26, 0x8a, 0x78, + 0xa7, 0x90, 0xc8, 0x3b, 0x1f, 0x49, 0x30, 0x1b, 0x69, 0xa0, 0xd0, 0x35, 0x28, 0xea, 0x86, 0x4d, + 0xd4, 0x81, 0x45, 0xc4, 0xf3, 0x81, 0x57, 0x15, 0x9b, 0x62, 0x1c, 0x7b, 0x1c, 0xa8, 0x06, 0x93, + 0xb6, 0xba, 0x4d, 0x3a, 0x83, 0x2e, 0xe9, 0x30, 0x8f, 0x14, 0xfd, 0xa7, 0xfc, 0x0d, 0x97, 0x80, + 0x7d, 0x1e, 0xd4, 0x00, 0xe0, 0xbd, 0x58, 0xcb, 0xec, 0xb8, 0xe1, 0xe6, 0x7e, 0xff, 0x06, 0x4d, + 0x8f, 0xf2, 0x6c, 0x7f, 0x71, 0xc6, 0xff, 0xc7, 0xfc, 0x1f, 0x90, 0x93, 0xff, 0x9d, 0x83, 0x52, + 0xb0, 0x11, 0x4b, 0xf0, 0x85, 0xc9, 0x3b, 0x30, 0xa5, 0x18, 0x86, 0xe9, 0x28, 0xbc, 0x5b, 0xce, + 0x24, 0xbe, 0x15, 0x0e, 0xea, 0xa9, 0xde, 0xf6, 0x21, 0xf8, 0xad, 0xb0, 0x97, 0x11, 0x02, 0x14, + 0x1c, 0xd4, 0x84, 0x6e, 0x8b, 0x16, 0x39, 0x9b, 0xbc, 0x45, 0x2e, 0x46, 0xda, 0xe3, 0x1a, 0x4c, + 0x7a, 0x9d, 0xa4, 0xf8, 0x78, 0xc9, 0xb3, 0xb2, 0xbf, 0xb5, 0x7d, 0x1e, 0x54, 0x0d, 0x05, 0x43, + 0x9e, 0x05, 0xc3, 0xcc, 0x21, 0x57, 0x1d, 0xd1, 0xfe, 0xbb, 0x30, 0xee, 0xfe, 0x7b, 0x62, 0x2c, + 0xfd, 0x77, 0xe5, 0xeb, 0x70, 0x3a, 0xea, 0xc1, 0x54, 0xef, 0xd2, 0xeb, 0x80, 0x86, 0xf5, 0x1f, + 0xd5, 0xc2, 0x0d, 0x4b, 0xf8, 0xf9, 0xac, 0x7e, 0xef, 0xc9, 0xd3, 0x85, 0x53, 0x9f, 0x3c, 0x5d, + 0x38, 0xf5, 0xd9, 0xd3, 0x85, 0x53, 0x3f, 0x39, 0x58, 0x90, 0x9e, 0x1c, 0x2c, 0x48, 0x9f, 0x1c, + 0x2c, 0x48, 0x9f, 0x1d, 0x2c, 0x48, 0x9f, 0x1f, 0x2c, 0x48, 0xbf, 0xf8, 0xc7, 0xc2, 0xa9, 0x87, + 0x17, 0x8f, 0xfc, 0x06, 0xff, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xa8, 0x38, 0xe0, 0xa7, + 0x2f, 0x00, 0x00, +} + +func (m *DockerImageReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerImageReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DockerImageManifests) > 0 { + for iNdEx := len(m.DockerImageManifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageManifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + i -= len(m.DockerImageConfig) + copy(dAtA[i:], m.DockerImageConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig))) + i-- + dAtA[i] = 0x52 + i -= len(m.DockerImageManifestMediaType) + copy(dAtA[i:], m.DockerImageManifestMediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType))) + i-- + dAtA[i] = 0x4a + if len(m.DockerImageSignatures) > 0 { + for iNdEx := len(m.DockerImageSignatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DockerImageSignatures[iNdEx]) + copy(dAtA[i:], m.DockerImageSignatures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageSignatures[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.DockerImageLayers) > 0 { + for iNdEx := len(m.DockerImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.DockerImageManifest) + copy(dAtA[i:], m.DockerImageManifest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest))) + i-- + dAtA[i] = 0x2a + i -= len(m.DockerImageMetadataVersion) + copy(dAtA[i:], m.DockerImageMetadataVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion))) + i-- + dAtA[i] = 0x22 + { + size, err := m.DockerImageMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageBlobReferences) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Manifests[iNdEx]) + copy(dAtA[i:], m.Manifests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifests[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.ImageMissing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Config != nil { + i -= len(*m.Config) + copy(dAtA[i:], *m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Layers[iNdEx]) + copy(dAtA[i:], m.Layers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Layers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Manifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayerData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + if m.LayerSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLookupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Local { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *ImageManifest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageManifest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageManifest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Variant) + copy(dAtA[i:], m.Variant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Variant))) + i-- + dAtA[i] = 0x32 + i -= len(m.OS) + copy(dAtA[i:], m.OS) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OS))) + i-- + dAtA[i] = 0x2a + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ManifestSize)) + i-- + dAtA[i] = 0x18 + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IssuedTo != nil { + { + size, err := m.IssuedTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.IssuedBy != nil { + { + size, err := m.IssuedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Created != nil { + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.SignedClaims) > 0 { + keysForSignedClaims := make([]string, 0, len(m.SignedClaims)) + for k := range m.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + for iNdEx := len(keysForSignedClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.SignedClaims[string(keysForSignedClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSignedClaims[iNdEx]) + copy(dAtA[i:], keysForSignedClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSignedClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ImageIdentity) + copy(dAtA[i:], m.ImageIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity))) + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Content != nil { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImport) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Import { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Import != nil { + { + size, err := m.Import.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamLayers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + keysForImages := make([]string, 0, len(m.Images)) + for k := range m.Images { + keysForImages = append(keysForImages, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + for iNdEx := len(keysForImages) - 1; iNdEx >= 0; iNdEx-- { + v := m.Images[string(keysForImages[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForImages[iNdEx]) + copy(dAtA[i:], keysForImages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForImages[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blobs) > 0 { + keysForBlobs := make([]string, 0, len(m.Blobs)) + for k := range m.Blobs { + keysForBlobs = append(keysForBlobs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + for iNdEx := len(keysForBlobs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Blobs[string(keysForBlobs[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForBlobs[iNdEx]) + copy(dAtA[i:], keysForBlobs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBlobs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicDockerImageRepository) + copy(dAtA[i:], m.PublicDockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository))) + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x18 + if m.Tag != nil { + { + size, err := m.Tag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Spec != nil { + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedTagEventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalTags) > 0 { + for iNdEx := len(m.AdditionalTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalTags[iNdEx]) + copy(dAtA[i:], m.AdditionalTags[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalTags[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureGenericEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommonName) + copy(dAtA[i:], m.CommonName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Organization) + copy(dAtA[i:], m.Organization) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureIssuer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicKeyID) + copy(dAtA[i:], m.PublicKeyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x20 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEventCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEventCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x30 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImportPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImportMode) + copy(dAtA[i:], m.ImportMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImportMode))) + i-- + dAtA[i] = 0x1a + i-- + if m.Scheduled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Generation != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) + i-- + dAtA[i] = 0x28 + } + i-- + if m.Reference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReferencePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DockerImageReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DockerImageMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageMetadataVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageManifest) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageLayers) > 0 { + for _, e := range m.DockerImageLayers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DockerImageSignatures) > 0 { + for _, b := range m.DockerImageSignatures { + l = len(b) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.DockerImageManifestMediaType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageConfig) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageManifests) > 0 { + for _, e := range m.DockerImageManifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageBlobReferences) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = len(*m.Config) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Manifests) > 0 { + for _, s := range m.Manifests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Manifests) > 0 { + for _, e := range m.Manifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.LayerSize)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLayerData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LayerSize != nil { + n += 1 + sovGenerated(uint64(*m.LayerSize)) + } + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLookupPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *ImageManifest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ManifestSize)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OS) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Variant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Content != nil { + l = len(m.Content) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ImageIdentity) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.SignedClaims) > 0 { + for k, v := range m.SignedClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Created != nil { + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedBy != nil { + l = m.IssuedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedTo != nil { + l = m.IssuedTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImport) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Import != nil { + l = m.Import.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamLayers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Blobs) > 0 { + for k, v := range m.Blobs { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Images) > 0 { + for k, v := range m.Images { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageStreamList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PublicDockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Tag != nil { + l = m.Tag.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Generation)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedTagEventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RepositoryImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RepositoryImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalTags) > 0 { + for _, s := range m.AdditionalTags { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SignatureCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureGenericEntity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Organization) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommonName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureIssuer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PublicKeyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagEventCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagImportPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + l = len(m.ImportMode) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.Generation != nil { + n += 1 + sovGenerated(uint64(*m.Generation)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReferencePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DockerImageReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DockerImageReference{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + repeatedStringForDockerImageLayers := "[]ImageLayer{" + for _, f := range this.DockerImageLayers { + repeatedStringForDockerImageLayers += strings.Replace(strings.Replace(f.String(), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageLayers += "}" + repeatedStringForSignatures := "[]ImageSignature{" + for _, f := range this.Signatures { + repeatedStringForSignatures += strings.Replace(strings.Replace(f.String(), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + "," + } + repeatedStringForSignatures += "}" + repeatedStringForDockerImageManifests := "[]ImageManifest{" + for _, f := range this.DockerImageManifests { + repeatedStringForDockerImageManifests += strings.Replace(strings.Replace(f.String(), "ImageManifest", "ImageManifest", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageManifests += "}" + s := strings.Join([]string{`&Image{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `DockerImageMetadata:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageMetadata), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`, + `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`, + `DockerImageLayers:` + repeatedStringForDockerImageLayers + `,`, + `Signatures:` + repeatedStringForSignatures + `,`, + `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`, + `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`, + `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`, + `DockerImageManifests:` + repeatedStringForDockerImageManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageBlobReferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageBlobReferences{`, + `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, + `Config:` + valueToStringGenerated(this.Config) + `,`, + `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`, + `Manifests:` + fmt.Sprintf("%v", this.Manifests) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForManifests := "[]Image{" + for _, f := range this.Manifests { + repeatedStringForManifests += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForManifests += "}" + s := strings.Join([]string{`&ImageImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Manifests:` + repeatedStringForManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayerData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayerData{`, + `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Image{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageLookupPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLookupPolicy{`, + `Local:` + fmt.Sprintf("%v", this.Local) + `,`, + `}`, + }, "") + return s +} +func (this *ImageManifest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageManifest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `ManifestSize:` + fmt.Sprintf("%v", this.ManifestSize) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSignature) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]SignatureCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForSignedClaims := make([]string, 0, len(this.SignedClaims)) + for k := range this.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + mapStringForSignedClaims := "map[string]string{" + for _, k := range keysForSignedClaims { + mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k]) + } + mapStringForSignedClaims += "}" + s := strings.Join([]string{`&ImageSignature{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Content:` + valueToStringGenerated(this.Content) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`, + `SignedClaims:` + mapStringForSignedClaims + `,`, + `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1) + `,`, + `IssuedBy:` + strings.Replace(this.IssuedBy.String(), "SignatureIssuer", "SignatureIssuer", 1) + `,`, + `IssuedTo:` + strings.Replace(this.IssuedTo.String(), "SignatureSubject", "SignatureSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStream{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImage{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImport) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImport{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportSpec{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportSpec{`, + `Import:` + fmt.Sprintf("%v", this.Import) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportStatus{`, + `Import:` + strings.Replace(this.Import.String(), "ImageStream", "ImageStream", 1) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamLayers) String() string { + if this == nil { + return "nil" + } + keysForBlobs := make([]string, 0, len(this.Blobs)) + for k := range this.Blobs { + keysForBlobs = append(keysForBlobs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + mapStringForBlobs := "map[string]ImageLayerData{" + for _, k := range keysForBlobs { + mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) + } + mapStringForBlobs += "}" + keysForImages := make([]string, 0, len(this.Images)) + for k := range this.Images { + keysForImages = append(keysForImages, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + mapStringForImages := "map[string]ImageBlobReferences{" + for _, k := range keysForImages { + mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) + } + mapStringForImages += "}" + s := strings.Join([]string{`&ImageStreamLayers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Blobs:` + mapStringForBlobs + `,`, + `Images:` + mapStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStream{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStream", "ImageStream", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]TagReference{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "TagReference", "TagReference", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamSpec{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]NamedTagEventList{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamStatus{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTag) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ImageStreamTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Tag:` + strings.Replace(this.Tag.String(), "TagReference", "TagReference", 1) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStreamTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(this.Spec.String(), "TagReference", "TagReference", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "NamedTagEventList", "NamedTagEventList", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageTag", "ImageTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedTagEventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TagEvent{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TagEvent", "TagEvent", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamedTagEventList{`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepositoryImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&RepositoryImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SignatureCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureGenericEntity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureGenericEntity{`, + `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, + `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureIssuer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureIssuer{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureSubject{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`, + `}`, + }, "") + return s +} +func (this *TagEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEvent{`, + `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagEventCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEventCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagImportPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImportPolicy{`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`, + `ImportMode:` + fmt.Sprintf("%v", this.ImportMode) + `,`, + `}`, + }, "") + return s +} +func (this *TagReference) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&TagReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `Generation:` + valueToStringGenerated(this.Generation) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagReferencePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagReferencePolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DockerImageReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{}) + if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, ImageSignature{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx)) + copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifests = append(m.DockerImageManifests, ImageManifest{}) + if err := m.DockerImageManifests[len(m.DockerImageManifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Config = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ImageMissing = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.LocalObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, Image{}) + if err := m.Manifests[len(m.Manifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + m.LayerSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayerData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LayerSize = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Image{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Local = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageManifest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageManifest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageManifest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManifestSize", wireType) + } + m.ManifestSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ManifestSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...) + if m.Content == nil { + m.Content = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, SignatureCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageIdentity = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedClaims == nil { + m.SignedClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SignedClaims[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Created == nil { + m.Created = &v1.Time{} + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedBy == nil { + m.IssuedBy = &SignatureIssuer{} + } + if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedTo == nil { + m.IssuedTo = &SignatureSubject{} + } + if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImport) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Import = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportSpec{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportSpec{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Import == nil { + m.Import = &ImageStream{} + } + if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportStatus{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blobs == nil { + m.Blobs = make(map[string]ImageLayerData) + } + var mapkey string + mapvalue := &ImageLayerData{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageLayerData{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Blobs[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Images == nil { + m.Images = make(map[string]ImageBlobReferences) + } + var mapkey string + mapvalue := &ImageBlobReferences{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageBlobReferences{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Images[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStream{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, TagReference{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, NamedTagEventList{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tag == nil { + m.Tag = &TagReference{} + } + if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStreamTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &TagReference{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NamedTagEventList{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedTagEventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TagEvent{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v11.Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SignatureConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organization = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureIssuer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKeyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEventCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagEventConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImportPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Scheduled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportMode = ImportModeType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Reference = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Generation = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.proto b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.proto new file mode 100644 index 000000000..74d8d66c4 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/generated.proto @@ -0,0 +1,752 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package git.colasdn.top.openshift.api.image.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/image/v1"; + +// DockerImageReference points to a container image. +message DockerImageReference { + // Registry is the registry that contains the container image + optional string registry = 1; + + // Namespace is the namespace that contains the container image + optional string namespace = 2; + + // Name is the name of the container image + optional string name = 3; + + // Tag is which tag of the container image is being referenced + optional string tag = 4; + + // ID is the identifier for the container image + optional string iD = 5; +} + +// Image is an immutable representation of a container image and its metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Image { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // dockerImageReference is the string that can be used to pull this image. + optional string dockerImageReference = 2; + + // dockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + optional .k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; + + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + optional string dockerImageMetadataVersion = 4; + + // dockerImageManifest is the raw JSON of the manifest + optional string dockerImageManifest = 5; + + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + repeated ImageLayer dockerImageLayers = 6; + + // signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + repeated ImageSignature signatures = 7; + + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + repeated bytes dockerImageSignatures = 8; + + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + optional string dockerImageManifestMediaType = 9; + + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + optional string dockerImageConfig = 10; + + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + repeated ImageManifest dockerImageManifests = 11; +} + +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + optional bool imageMissing = 3; + + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + optional string config = 2; + + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + repeated string manifests = 4; +} + +// ImageImportSpec describes a request to import a specific image. +message ImageImportSpec { + // from is the source of an image to import; only kind DockerImage is allowed + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + optional .k8s.io.api.core.v1.LocalObjectReference to = 2; + + // importPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 3; + + // referencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 5; + + // includeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 4; +} + +// ImageImportStatus describes the result of an image import. +message ImageImportStatus { + // status is the status of the image import, including errors encountered while retrieving the image + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // image is the metadata of that image, if the image was located + optional Image image = 2; + + // tag is the tag this image was located under, if any + optional string tag = 3; + + // manifests holds sub-manifests metadata when importing a manifest list + repeated Image manifests = 4; +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +message ImageLayer { + // name of the layer as defined by the underlying store. + optional string name = 1; + + // size of the layer in bytes as defined by the underlying store. + optional int64 size = 2; + + // mediaType of the referenced object. + optional string mediaType = 3; +} + +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // mediaType of the referenced object. + optional string mediaType = 2; +} + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of images + repeated Image items = 2; +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +message ImageLookupPolicy { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + optional bool local = 3; +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +message ImageManifest { + // digest is the unique identifier for the manifest. It refers to an Image object. + optional string digest = 1; + + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + optional string mediaType = 2; + + // manifestSize represents the size of the raw object contents, in bytes. + optional int64 manifestSize = 3; + + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + optional string architecture = 4; + + // os specifies the operating system, for example `linux`. + optional string os = 5; + + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + optional string variant = 6; +} + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageSignature { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required: Describes a type of stored blob. + optional string type = 2; + + // Required: An opaque binary string which is an image's signature. + optional bytes content = 3; + + // conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated SignatureCondition conditions = 4; + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + optional string imageIdentity = 5; + + // Contains claims from the signature. + map signedClaims = 6; + + // If specified, it is the time of signature's creation. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; + + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + optional SignatureIssuer issuedBy = 8; + + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + optional SignatureSubject issuedTo = 9; +} + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStream { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the desired state of this stream + // +optional + optional ImageStreamSpec spec = 2; + + // status describes the current state of this stream + // +optional + optional ImageStreamStatus status = 3; +} + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImage { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // image associated with the ImageStream and image name. + optional Image image = 2; +} + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImport { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is a description of the images that the user wishes to import + optional ImageStreamImportSpec spec = 2; + + // status is the result of importing the image + optional ImageStreamImportStatus status = 3; +} + +// ImageStreamImportSpec defines what images should be imported. +message ImageStreamImportSpec { + // import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + optional bool import = 1; + + // repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + optional RepositoryImportSpec repository = 2; + + // images are a list of individual images to import. + repeated ImageImportSpec images = 3; +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +message ImageStreamImportStatus { + // import is the image stream that was successfully updated or created when 'to' was set. + // +optional + optional ImageStream import = 1; + + // repository is set if spec.repository was set to the outcome of the import + // +optional + optional RepositoryImportStatus repository = 2; + + // images is set with the result of importing spec.images + // +optional + repeated ImageImportStatus images = 3; +} + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamLayers { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map blobs = 2; + + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + map images = 3; +} + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of imageStreams + repeated ImageStream items = 2; +} + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamMapping { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // image is a container image. + optional Image image = 2; + + // tag is a string value this image can be located with inside the stream. + optional string tag = 3; +} + +// ImageStreamSpec represents options for ImageStreams. +message ImageStreamSpec { + // lookupPolicy controls how other resources reference images within this namespace. + optional ImageLookupPolicy lookupPolicy = 3; + + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + optional string dockerImageRepository = 1; + + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + repeated TagReference tags = 2; +} + +// ImageStreamStatus contains information about the state of this image stream. +message ImageStreamStatus { + // dockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + // +optional + optional string dockerImageRepository = 1; + + // publicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + // +optional + optional string publicDockerImageRepository = 3; + + // tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + // +optional + repeated NamedTagEventList tags = 2; +} + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference tag = 2; + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + optional int64 generation = 3; + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + optional ImageLookupPolicy lookupPolicy = 6; + + // conditions is an array of conditions that apply to the image stream tag. + repeated TagEventCondition conditions = 4; + + // image associated with the ImageStream and tag. + optional Image image = 5; +} + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of image stream tags + repeated ImageStreamTag items = 2; +} + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form, a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference spec = 2; + + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + optional NamedTagEventList status = 3; + + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + optional Image image = 4; +} + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of image stream tags + repeated ImageTag items = 2; +} + +// NamedTagEventList relates a tag to its image history. +message NamedTagEventList { + // tag is the tag for which the history is recorded + optional string tag = 1; + + // Standard object's metadata. + repeated TagEvent items = 2; + + // conditions is an array of conditions that apply to the tag event list. + repeated TagEventCondition conditions = 3; +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +message RepositoryImportSpec { + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // importPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 2; + + // referencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 4; + + // includeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 3; +} + +// RepositoryImportStatus describes the result of an image repository import +message RepositoryImportStatus { + // status reflects whether any failure occurred during import + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // images is a list of images successfully retrieved by the import of the repository. + repeated ImageImportStatus images = 2; + + // additionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + repeated string additionalTags = 3; +} + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +message SecretList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + repeated .k8s.io.api.core.v1.Secret items = 2; +} + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +message SignatureCondition { + // type of signature condition, Complete or Failed. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +message SignatureGenericEntity { + // organization name. + optional string organization = 1; + + // Common name (e.g. openshift-signing-service). + optional string commonName = 2; +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +message SignatureIssuer { + optional SignatureGenericEntity signatureGenericEntity = 1; +} + +// SignatureSubject holds information about a person or entity who created the signature. +message SignatureSubject { + optional SignatureGenericEntity signatureGenericEntity = 1; + + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + optional string publicKeyID = 2; +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +message TagEvent { + // created holds the time the TagEvent was created + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; + + // dockerImageReference is the string that can be used to pull this image + optional string dockerImageReference = 2; + + // image is the image + optional string image = 3; + + // generation is the spec tag generation that resulted in this tag being updated + optional int64 generation = 4; +} + +// TagEventCondition contains condition information for a tag event. +message TagEventCondition { + // type of tag event condition, currently only ImportSuccess + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // lastTransitionTime is the time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a brief machine readable explanation for the condition's last transition. + optional string reason = 4; + + // message is a human readable description of the details about last transition, complementing reason. + optional string message = 5; + + // generation is the spec tag generation that this status corresponds to + optional int64 generation = 6; +} + +// TagImportPolicy controls how images related to this tag will be imported. +message TagImportPolicy { + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + optional bool insecure = 1; + + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + optional bool scheduled = 2; + + // importMode describes how to import an image manifest. + optional string importMode = 3; +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +message TagReference { + // name of the tag + optional string name = 1; + + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + map annotations = 2; + + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + optional .k8s.io.api.core.v1.ObjectReference from = 3; + + // reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + optional bool reference = 4; + + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + optional int64 generation = 5; + + // importPolicy is information that controls how images may be imported by the server. + optional TagImportPolicy importPolicy = 6; + + // referencePolicy defines how other components should consume the image. + optional TagReferencePolicy referencePolicy = 7; +} + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +message TagReferencePolicy { + // type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + optional string type = 1; +} + diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/legacy.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/legacy.go new file mode 100644 index 000000000..02bbaa290 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/legacy.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamImport{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/register.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/register.go new file mode 100644 index 000000000..0d924103a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/register.go @@ -0,0 +1,54 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" +) + +var ( + GroupName = "image.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamLayers{}, + &ImageStreamImport{}, + &ImageTag{}, + &ImageTagList{}, + &SecretList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/types.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/types.go new file mode 100644 index 000000000..e45d6b10d --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/types.go @@ -0,0 +1,772 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of images + Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image is an immutable representation of a container image and its metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Image struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // dockerImageReference is the string that can be used to pull this image. + DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` + // dockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` + // dockerImageManifest is the raw JSON of the manifest + DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"` + // signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"` +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +type ImageManifest struct { + // digest is the unique identifier for the manifest. It refers to an Image object. + Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"` + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` + // manifestSize represents the size of the raw object contents, in bytes. + ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"` + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"` + // os specifies the operating system, for example `linux`. + OS string `json:"os" protobuf:"bytes,5,opt,name=os"` + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"` +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +type ImageLayer struct { + // name of the layer as defined by the underlying store. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // size of the layer in bytes as defined by the underlying store. + LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` + // mediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create,delete +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageSignature struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required: Describes a type of stored blob. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // Required: An opaque binary string which is an image's signature. + Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` + // conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + + // Following metadata fields will be set by server if the signature content is successfully parsed and + // the information available. + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"` + // Contains claims from the signature. + SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"` + // If specified, it is the time of signature's creation. + Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"` + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"` + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"` +} + +// SignatureConditionType is a type of image signature condition. +type SignatureConditionType string + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +type SignatureCondition struct { + // type of signature condition, Complete or Failed. + Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +type SignatureGenericEntity struct { + // organization name. + Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` + // Common name (e.g. openshift-signing-service). + CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +type SignatureIssuer struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` +} + +// SignatureSubject holds information about a person or entity who created the signature. +type SignatureSubject struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of imageStreams + Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=Secrets,verb=get,subresource=secrets,result=github.com/openshift/api/image/v1.SecretList +// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStream struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the desired state of this stream + // +optional + Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status describes the current state of this stream + // +optional + Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamSpec represents options for ImageStreams. +type ImageStreamSpec struct { + // lookupPolicy controls how other resources reference images within this namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"` + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"` +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +type ImageLookupPolicy struct { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + Local bool `json:"local" protobuf:"varint,3,opt,name=local"` +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +type TagReference struct { + // name of the tag + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` + // reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` + // importPolicy is information that controls how images may be imported by the server. + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image. + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` +} + +// TagImportPolicy controls how images related to this tag will be imported. +type TagImportPolicy struct { + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` + // importMode describes how to import an image manifest. + ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"` +} + +// ImportModeType describes how to import an image manifest. +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + +// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when +// image change triggers are fired. +type TagReferencePolicyType string + +const ( + // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag + // is resolved into other resources (builds and deployment configurations). + SourceTagReferencePolicy TagReferencePolicyType = "Source" + // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry, + // falling back to the remote location if the integrated registry has not been configured. The reference will + // use the internal DNS name or registry service IP. + LocalTagReferencePolicy TagReferencePolicyType = "Local" +) + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +type TagReferencePolicy struct { + // type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"` +} + +// ImageStreamStatus contains information about the state of this image stream. +type ImageStreamStatus struct { + // dockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + // +optional + DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // publicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + // +optional + PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` + // tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + // +optional + Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` +} + +// NamedTagEventList relates a tag to its image history. +type NamedTagEventList struct { + // tag is the tag for which the history is recorded + Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` + // Standard object's metadata. + Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` + // conditions is an array of conditions that apply to the tag event list. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +type TagEvent struct { + // created holds the time the TagEvent was created + Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` + // dockerImageReference is the string that can be used to pull this image + DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` + // image is the image + Image string `json:"image" protobuf:"bytes,3,opt,name=image"` + // generation is the spec tag generation that resulted in this tag being updated + Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` +} + +type TagEventConditionType string + +// These are valid conditions of TagEvents. +const ( + // ImportSuccess with status False means the import of the specific tag failed + ImportSuccess TagEventConditionType = "ImportSuccess" +) + +// TagEventCondition contains condition information for a tag event. +type TagEventCondition struct { + // type of tag event condition, currently only ImportSuccess + Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // lastTransitionTime is the time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a brief machine readable explanation for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human readable description of the details about last transition, complementing reason. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` + // generation is the spec tag generation that this status corresponds to + Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamMapping struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // image is a container image. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` + // tag is a string value this image can be located with inside the stream. + Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"` + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"` + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"` + + // conditions is an array of conditions that apply to the image stream tag. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"` + + // image associated with the ImageStream and tag. + Image Image `json:"image" protobuf:"bytes,5,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of image stream tags + Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form, a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"` + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of image stream tags + Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // image associated with the ImageStream and image name. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` +} + +// DockerImageReference points to a container image. +type DockerImageReference struct { + // Registry is the registry that contains the container image + Registry string `protobuf:"bytes,1,opt,name=registry"` + // Namespace is the namespace that contains the container image + Namespace string `protobuf:"bytes,2,opt,name=namespace"` + // Name is the name of the container image + Name string `protobuf:"bytes,3,opt,name=name"` + // Tag is which tag of the container image is being referenced + Tag string `protobuf:"bytes,4,opt,name=tag"` + // ID is the identifier for the container image + ID string `protobuf:"bytes,5,opt,name=iD"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamLayers struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"` + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + Config *string `json:"config" protobuf:"bytes,2,opt,name=config"` + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + Manifests []string `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` + // mediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImport struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is a description of the images that the user wishes to import + Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the result of importing the image + Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamImportSpec defines what images should be imported. +type ImageStreamImportSpec struct { + // import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + Import bool `json:"import" protobuf:"varint,1,opt,name=import"` + // repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // images are a list of individual images to import. + Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +type ImageStreamImportStatus struct { + // import is the image stream that was successfully updated or created when 'to' was set. + // +optional + Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` + // repository is set if spec.repository was set to the outcome of the import + // +optional + Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // images is set with the result of importing spec.images + // +optional + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +type RepositoryImportSpec struct { + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // importPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` + // includeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` +} + +// RepositoryImportStatus describes the result of an image repository import +type RepositoryImportStatus struct { + // status reflects whether any failure occurred during import + Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // images is a list of images successfully retrieved by the import of the repository. + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` + // additionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` +} + +// ImageImportSpec describes a request to import a specific image. +type ImageImportSpec struct { + // from is the source of an image to import; only kind DockerImage is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` + + // importPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` + // includeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` +} + +// ImageImportStatus describes the result of an image import. +type ImageImportStatus struct { + // status is the status of the image import, including errors encountered while retrieving the image + Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` + // image is the metadata of that image, if the image was located + Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // tag is the tag this image was located under, if any + Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` + // manifests holds sub-manifests metadata when importing a manifest list + Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +type SecretList corev1.SecretList diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..af6eee05c --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -0,0 +1,1045 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata) + if in.DockerImageLayers != nil { + in, out := &in.DockerImageLayers, &out.DockerImageLayers + *out = make([]ImageLayer, len(*in)) + copy(*out, *in) + } + if in.Signatures != nil { + in, out := &in.Signatures, &out.Signatures + *out = make([]ImageSignature, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerImageSignatures != nil { + in, out := &in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]byte, len(*in)) + copy(*out, *in) + } + } + } + if in.DockerImageManifests != nil { + in, out := &in.DockerImageManifests, &out.DockerImageManifests + *out = make([]ImageManifest, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(string) + **out = **in + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { + *out = *in + out.From = in.From + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.LocalObjectReference) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec. +func (in *ImageImportSpec) DeepCopy() *ImageImportSpec { + if in == nil { + return nil + } + out := new(ImageImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus. +func (in *ImageImportStatus) DeepCopy() *ImageImportStatus { + if in == nil { + return nil + } + out := new(ImageImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayer) DeepCopyInto(out *ImageLayer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer. +func (in *ImageLayer) DeepCopy() *ImageLayer { + if in == nil { + return nil + } + out := new(ImageLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy. +func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy { + if in == nil { + return nil + } + out := new(ImageLookupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageManifest) DeepCopyInto(out *ImageManifest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManifest. +func (in *ImageManifest) DeepCopy() *ImageManifest { + if in == nil { + return nil + } + out := new(ImageManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSignature) DeepCopyInto(out *ImageSignature) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SignatureCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignedClaims != nil { + in, out := &in.SignedClaims, &out.SignedClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Created != nil { + in, out := &in.Created, &out.Created + *out = (*in).DeepCopy() + } + if in.IssuedBy != nil { + in, out := &in.IssuedBy, &out.IssuedBy + *out = new(SignatureIssuer) + **out = **in + } + if in.IssuedTo != nil { + in, out := &in.IssuedTo, &out.IssuedTo + *out = new(SignatureSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature. +func (in *ImageSignature) DeepCopy() *ImageSignature { + if in == nil { + return nil + } + out := new(ImageSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageSignature) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStream) DeepCopyInto(out *ImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream. +func (in *ImageStream) DeepCopy() *ImageStream { + if in == nil { + return nil + } + out := new(ImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage. +func (in *ImageStreamImage) DeepCopy() *ImageStreamImage { + if in == nil { + return nil + } + out := new(ImageStreamImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport. +func (in *ImageStreamImport) DeepCopy() *ImageStreamImport { + if in == nil { + return nil + } + out := new(ImageStreamImport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportSpec) + **out = **in + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec. +func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec { + if in == nil { + return nil + } + out := new(ImageStreamImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) { + *out = *in + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImageStream) + (*in).DeepCopyInto(*out) + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportStatus) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus. +func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { + if in == nil { + return nil + } + out := new(ImageStreamImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList. +func (in *ImageStreamList) DeepCopy() *ImageStreamList { + if in == nil { + return nil + } + out := new(ImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping. +func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping { + if in == nil { + return nil + } + out := new(ImageStreamMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) { + *out = *in + out.LookupPolicy = in.LookupPolicy + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec. +func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec { + if in == nil { + return nil + } + out := new(ImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]NamedTagEventList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus. +func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus { + if in == nil { + return nil + } + out := new(ImageStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + out.LookupPolicy = in.LookupPolicy + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag. +func (in *ImageStreamTag) DeepCopy() *ImageStreamTag { + if in == nil { + return nil + } + out := new(ImageStreamTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStreamTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList. +func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList { + if in == nil { + return nil + } + out := new(ImageStreamTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTag) DeepCopyInto(out *ImageTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NamedTagEventList) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag. +func (in *ImageTag) DeepCopy() *ImageTag { + if in == nil { + return nil + } + out := new(ImageTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagList) DeepCopyInto(out *ImageTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList. +func (in *ImageTagList) DeepCopy() *ImageTagList { + if in == nil { + return nil + } + out := new(ImageTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TagEvent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList. +func (in *NamedTagEventList) DeepCopy() *NamedTagEventList { + if in == nil { + return nil + } + out := new(NamedTagEventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) { + *out = *in + out.From = in.From + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec. +func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec { + if in == nil { + return nil + } + out := new(RepositoryImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus. +func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus { + if in == nil { + return nil + } + out := new(RepositoryImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]corev1.Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition. +func (in *SignatureCondition) DeepCopy() *SignatureCondition { + if in == nil { + return nil + } + out := new(SignatureCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity. +func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity { + if in == nil { + return nil + } + out := new(SignatureGenericEntity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer. +func (in *SignatureIssuer) DeepCopy() *SignatureIssuer { + if in == nil { + return nil + } + out := new(SignatureIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject. +func (in *SignatureSubject) DeepCopy() *SignatureSubject { + if in == nil { + return nil + } + out := new(SignatureSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEvent) DeepCopyInto(out *TagEvent) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent. +func (in *TagEvent) DeepCopy() *TagEvent { + if in == nil { + return nil + } + out := new(TagEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition. +func (in *TagEventCondition) DeepCopy() *TagEventCondition { + if in == nil { + return nil + } + out := new(TagEventCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy. +func (in *TagImportPolicy) DeepCopy() *TagImportPolicy { + if in == nil { + return nil + } + out := new(TagImportPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReference) DeepCopyInto(out *TagReference) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(int64) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference. +func (in *TagReference) DeepCopy() *TagReference { + if in == nil { + return nil + } + out := new(TagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy. +func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy { + if in == nil { + return nil + } + out := new(TagReferencePolicy) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..9671768f6 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,444 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerImageReference = map[string]string{ + "": "DockerImageReference points to a container image.", + "Registry": "Registry is the registry that contains the container image", + "Namespace": "Namespace is the namespace that contains the container image", + "Name": "Name is the name of the container image", + "Tag": "Tag is which tag of the container image is being referenced", + "ID": "ID is the identifier for the container image", +} + +func (DockerImageReference) SwaggerDoc() map[string]string { + return map_DockerImageReference +} + +var map_Image = map[string]string{ + "": "Image is an immutable representation of a container image and its metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users, instead, access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "dockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "dockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "signatures": "signatures holds all signatures of the image.", + "dockerImageSignatures": "dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "dockerImageManifests": "dockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageBlobReferences = map[string]string{ + "": "ImageBlobReferences describes the blob references within an image.", + "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.", + "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.", + "manifests": "manifests is the list of other image names that this image points to. For a single architecture image, it is empty. For a multi-arch image, it consists of the digests of single architecture images, such images shouldn't have layers nor config.", +} + +func (ImageBlobReferences) SwaggerDoc() map[string]string { + return map_ImageBlobReferences +} + +var map_ImageImportSpec = map[string]string{ + "": "ImageImportSpec describes a request to import a specific image.", + "from": "from is the source of an image to import; only kind DockerImage is allowed", + "to": "to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", +} + +func (ImageImportSpec) SwaggerDoc() map[string]string { + return map_ImageImportSpec +} + +var map_ImageImportStatus = map[string]string{ + "": "ImageImportStatus describes the result of an image import.", + "status": "status is the status of the image import, including errors encountered while retrieving the image", + "image": "image is the metadata of that image, if the image was located", + "tag": "tag is the tag this image was located under, if any", + "manifests": "manifests holds sub-manifests metadata when importing a manifest list", +} + +func (ImageImportStatus) SwaggerDoc() map[string]string { + return map_ImageImportStatus +} + +var map_ImageLayer = map[string]string{ + "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", + "name": "name of the layer as defined by the underlying store.", + "size": "size of the layer in bytes as defined by the underlying store.", + "mediaType": "mediaType of the referenced object.", +} + +func (ImageLayer) SwaggerDoc() map[string]string { + return map_ImageLayer +} + +var map_ImageLayerData = map[string]string{ + "": "ImageLayerData contains metadata about an image layer.", + "size": "size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "mediaType of the referenced object.", +} + +func (ImageLayerData) SwaggerDoc() map[string]string { + return map_ImageLayerData +} + +var map_ImageList = map[string]string{ + "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of images", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageLookupPolicy = map[string]string{ + "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.", + "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.", +} + +func (ImageLookupPolicy) SwaggerDoc() map[string]string { + return map_ImageLookupPolicy +} + +var map_ImageManifest = map[string]string{ + "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.", + "digest": "digest is the unique identifier for the manifest. It refers to an Image object.", + "mediaType": "mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "manifestSize": "manifestSize represents the size of the raw object contents, in bytes.", + "architecture": "architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "os": "os specifies the operating system, for example `linux`.", + "variant": "variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", +} + +func (ImageManifest) SwaggerDoc() map[string]string { + return map_ImageManifest +} + +var map_ImageSignature = map[string]string{ + "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "type": "Required: Describes a type of stored blob.", + "content": "Required: An opaque binary string which is an image's signature.", + "conditions": "conditions represent the latest available observations of a signature's current state.", + "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", + "signedClaims": "Contains claims from the signature.", + "created": "If specified, it is the time of signature's creation.", + "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).", + "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).", +} + +func (ImageSignature) SwaggerDoc() map[string]string { + return map_ImageSignature +} + +var map_ImageStream = map[string]string{ + "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this stream", + "status": "status describes the current state of this stream", +} + +func (ImageStream) SwaggerDoc() map[string]string { + return map_ImageStream +} + +var map_ImageStreamImage = map[string]string{ + "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "image associated with the ImageStream and image name.", +} + +func (ImageStreamImage) SwaggerDoc() map[string]string { + return map_ImageStreamImage +} + +var map_ImageStreamImport = map[string]string{ + "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is a description of the images that the user wishes to import", + "status": "status is the result of importing the image", +} + +func (ImageStreamImport) SwaggerDoc() map[string]string { + return map_ImageStreamImport +} + +var map_ImageStreamImportSpec = map[string]string{ + "": "ImageStreamImportSpec defines what images should be imported.", + "import": "import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "images are a list of individual images to import.", +} + +func (ImageStreamImportSpec) SwaggerDoc() map[string]string { + return map_ImageStreamImportSpec +} + +var map_ImageStreamImportStatus = map[string]string{ + "": "ImageStreamImportStatus contains information about the status of an image stream import.", + "import": "import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "repository is set if spec.repository was set to the outcome of the import", + "images": "images is set with the result of importing spec.images", +} + +func (ImageStreamImportStatus) SwaggerDoc() map[string]string { + return map_ImageStreamImportStatus +} + +var map_ImageStreamLayers = map[string]string{ + "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "blobs": "blobs is a map of blob name to metadata about the blob.", + "images": "images is a map between an image name and the names of the blobs and config that comprise the image.", +} + +func (ImageStreamLayers) SwaggerDoc() map[string]string { + return map_ImageStreamLayers +} + +var map_ImageStreamList = map[string]string{ + "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of imageStreams", +} + +func (ImageStreamList) SwaggerDoc() map[string]string { + return map_ImageStreamList +} + +var map_ImageStreamMapping = map[string]string{ + "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "image is a container image.", + "tag": "tag is a string value this image can be located with inside the stream.", +} + +func (ImageStreamMapping) SwaggerDoc() map[string]string { + return map_ImageStreamMapping +} + +var map_ImageStreamSpec = map[string]string{ + "": "ImageStreamSpec represents options for ImageStreams.", + "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.", + "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.", + "tags": "tags map arbitrary string values to specific image locators", +} + +func (ImageStreamSpec) SwaggerDoc() map[string]string { + return map_ImageStreamSpec +} + +var map_ImageStreamStatus = map[string]string{ + "": "ImageStreamStatus contains information about the state of this image stream.", + "dockerImageRepository": "dockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "publicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", +} + +func (ImageStreamStatus) SwaggerDoc() map[string]string { + return map_ImageStreamStatus +} + +var map_ImageStreamTag = map[string]string{ + "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.", + "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.", + "conditions": "conditions is an array of conditions that apply to the image stream tag.", + "image": "image associated with the ImageStream and tag.", +} + +func (ImageStreamTag) SwaggerDoc() map[string]string { + return map_ImageStreamTag +} + +var map_ImageStreamTagList = map[string]string{ + "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of image stream tags", +} + +func (ImageStreamTagList) SwaggerDoc() map[string]string { + return map_ImageStreamTagList +} + +var map_ImageTag = map[string]string{ + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form, a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", + "image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.", +} + +func (ImageTag) SwaggerDoc() map[string]string { + return map_ImageTag +} + +var map_ImageTagList = map[string]string{ + "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of image stream tags", +} + +func (ImageTagList) SwaggerDoc() map[string]string { + return map_ImageTagList +} + +var map_NamedTagEventList = map[string]string{ + "": "NamedTagEventList relates a tag to its image history.", + "tag": "tag is the tag for which the history is recorded", + "items": "Standard object's metadata.", + "conditions": "conditions is an array of conditions that apply to the tag event list.", +} + +func (NamedTagEventList) SwaggerDoc() map[string]string { + return map_NamedTagEventList +} + +var map_RepositoryImportSpec = map[string]string{ + "": "RepositoryImportSpec describes a request to import images from a container image repository.", + "from": "from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", +} + +func (RepositoryImportSpec) SwaggerDoc() map[string]string { + return map_RepositoryImportSpec +} + +var map_RepositoryImportStatus = map[string]string{ + "": "RepositoryImportStatus describes the result of an image repository import", + "status": "status reflects whether any failure occurred during import", + "images": "images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "additionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", +} + +func (RepositoryImportStatus) SwaggerDoc() map[string]string { + return map_RepositoryImportStatus +} + +var map_SignatureCondition = map[string]string{ + "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", + "type": "type of signature condition, Complete or Failed.", + "status": "status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (SignatureCondition) SwaggerDoc() map[string]string { + return map_SignatureCondition +} + +var map_SignatureGenericEntity = map[string]string{ + "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", + "organization": "organization name.", + "commonName": "Common name (e.g. openshift-signing-service).", +} + +func (SignatureGenericEntity) SwaggerDoc() map[string]string { + return map_SignatureGenericEntity +} + +var map_SignatureIssuer = map[string]string{ + "": "SignatureIssuer holds information about an issuer of signing certificate or key.", +} + +func (SignatureIssuer) SwaggerDoc() map[string]string { + return map_SignatureIssuer +} + +var map_SignatureSubject = map[string]string{ + "": "SignatureSubject holds information about a person or entity who created the signature.", + "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).", +} + +func (SignatureSubject) SwaggerDoc() map[string]string { + return map_SignatureSubject +} + +var map_TagEvent = map[string]string{ + "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", + "created": "created holds the time the TagEvent was created", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image", + "image": "image is the image", + "generation": "generation is the spec tag generation that resulted in this tag being updated", +} + +func (TagEvent) SwaggerDoc() map[string]string { + return map_TagEvent +} + +var map_TagEventCondition = map[string]string{ + "": "TagEventCondition contains condition information for a tag event.", + "type": "type of tag event condition, currently only ImportSuccess", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details about last transition, complementing reason.", + "generation": "generation is the spec tag generation that this status corresponds to", +} + +func (TagEventCondition) SwaggerDoc() map[string]string { + return map_TagEventCondition +} + +var map_TagImportPolicy = map[string]string{ + "": "TagImportPolicy controls how images related to this tag will be imported.", + "insecure": "insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "importMode": "importMode describes how to import an image manifest.", +} + +func (TagImportPolicy) SwaggerDoc() map[string]string { + return map_TagImportPolicy +} + +var map_TagReference = map[string]string{ + "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", + "name": "name of the tag", + "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", + "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", + "reference": "reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "importPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "referencePolicy defines how other components should consume the image.", +} + +func (TagReference) SwaggerDoc() map[string]string { + return map_TagReference +} + +var map_TagReferencePolicy = map[string]string{ + "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", + "type": "type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", +} + +func (TagReferencePolicy) SwaggerDoc() map[string]string { + return map_TagReferencePolicy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/Makefile b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/Makefile new file mode 100644 index 000000000..77f5d3409 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="operator.openshift.io/v1" diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/doc.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/doc.go new file mode 100644 index 000000000..3de961a7f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=operator.openshift.io +package v1 diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/register.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/register.go new file mode 100644 index 000000000..5920c4fca --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/register.go @@ -0,0 +1,82 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Authentication{}, + &AuthenticationList{}, + &DNS{}, + &DNSList{}, + &CloudCredential{}, + &CloudCredentialList{}, + &ClusterCSIDriver{}, + &ClusterCSIDriverList{}, + &Console{}, + &ConsoleList{}, + &CSISnapshotController{}, + &CSISnapshotControllerList{}, + &Etcd{}, + &EtcdList{}, + &KubeAPIServer{}, + &KubeAPIServerList{}, + &KubeControllerManager{}, + &KubeControllerManagerList{}, + &KubeScheduler{}, + &KubeSchedulerList{}, + &KubeStorageVersionMigrator{}, + &KubeStorageVersionMigratorList{}, + &MachineConfiguration{}, + &MachineConfigurationList{}, + &Network{}, + &NetworkList{}, + &OpenShiftAPIServer{}, + &OpenShiftAPIServerList{}, + &OpenShiftControllerManager{}, + &OpenShiftControllerManagerList{}, + &OLM{}, + &OLMList{}, + &ServiceCA{}, + &ServiceCAList{}, + &ServiceCatalogAPIServer{}, + &ServiceCatalogAPIServerList{}, + &ServiceCatalogControllerManager{}, + &ServiceCatalogControllerManagerList{}, + &IngressController{}, + &IngressControllerList{}, + &InsightsOperator{}, + &InsightsOperatorList{}, + &Storage{}, + &StorageList{}, + ) + + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types.go new file mode 100644 index 000000000..3a2141abb --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types.go @@ -0,0 +1,292 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MyOperatorResource is an example operator configuration type +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec MyOperatorResourceSpec `json:"spec"` + Status MyOperatorResourceStatus `json:"status"` +} + +type MyOperatorResourceSpec struct { + OperatorSpec `json:",inline"` +} + +type MyOperatorResourceStatus struct { + OperatorStatus `json:",inline"` +} + +// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` +type ManagementState string + +var ( + // Force means that the operator is actively managing its resources but will not block an upgrade + // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades + Force ManagementState = "Force" + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + Managed ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will + // brick the cluster. + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields operators need. It is intended to be anonymous included +// inside of the Spec struct for your particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for their operands. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel LogLevel `json:"logLevel,omitempty"` + + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` + + // unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + // Red Hat does not support the use of this field. + // Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + // Seek guidance from the Red Hat support before using this field. + // Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + // it is an input to the level for the operator + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ObservedConfig runtime.RawExtension `json:"observedConfig"` +} + +// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll +type LogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + Normal LogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + Debug LogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + Trace LogLevel = "Trace" + + // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster + // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. + TraceAll LogLevel = "TraceAll" +) + +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +listType=map + // +listMapKey=type + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // version is the level this availability applies to + // +optional + Version string `json:"version,omitempty"` + + // readyReplicas indicates how many replicas are ready and at the desired state + // +optional + ReadyReplicas int32 `json:"readyReplicas"` + + // latestAvailableRevision is the deploymentID of the most recent deployment + // +optional + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="must only increase" + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` + + // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +listType=map + // +listMapKey=group + // +listMapKey=resource + // +listMapKey=namespace + // +listMapKey=name + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` +} + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + // +required + Group string `json:"group"` + + // resource is the resource type of the thing you're tracking + // +required + Resource string `json:"resource"` + + // namespace is where the thing you're tracking is + // +required + Namespace string `json:"namespace"` + + // name is the name of the thing you're tracking + // +required + Name string `json:"name"` + + // TODO: Add validation for lastGeneration. The value for this field should generally increase, except when the associated + // resource has been deleted and re-created. To accurately validate this field, we should introduce a new UID field and only + // enforce an increasing value in lastGeneration when the UID remains unchanged. A change in the UID indicates that the resource + // was re-created, allowing the lastGeneration value to reset or decrease. + + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + Hash string `json:"hash"` +} + +var ( + // Available indicates that the operand is present and accessible in the cluster + OperatorStatusTypeAvailable = "Available" + // Progressing indicates that the operator is trying to transition the operand to a different state + OperatorStatusTypeProgressing = "Progressing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" + // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the + // current and desired states. + OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" + // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO + OperatorStatusTypeUpgradeable = "Upgradeable" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + // type of condition in CamelCase or in foo.example.com/CamelCase. + // --- + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + // useful (see .node.status.conditions), the ability to deconflict is important. + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // status of the condition, one of True, False, Unknown. + // +required + // +kubebuilder:validation:Enum=True;False;Unknown + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + Reason string `json:"reason,omitempty"` + + Message string `json:"message,omitempty"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// StaticPodOperatorSpec is spec for controllers that manage static pods. +type StaticPodOperatorSpec struct { + OperatorSpec `json:",inline"` + + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` + // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevisionReason describe the detailed reason for the most recent deployment + // +optional + LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"` + + // nodeStatuses track the deployment values and errors across individual nodes + // +listType=map + // +listMapKey=nodeName + // +optional + // +kubebuilder:validation:XValidation:rule="size(self.filter(status, status.?targetRevision.orValue(0) != 0)) <= 1",message="no more than 1 node status may have a nonzero targetRevision" + NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +// +kubebuilder:validation:XValidation:rule="has(self.currentRevision) || !has(oldSelf.currentRevision)",message="cannot be unset once set",fieldPath=".currentRevision" +// +kubebuilder:validation:XValidation:rule="oldSelf.hasValue() || !has(self.currentRevision)",message="currentRevision can not be set on creation of a nodeStatus",optionalOldSelf=true,fieldPath=.currentRevision +// +kubebuilder:validation:XValidation:rule="oldSelf.hasValue() || !has(self.targetRevision)",message="targetRevision can not be set on creation of a nodeStatus",optionalOldSelf=true,fieldPath=.targetRevision +type NodeStatus struct { + // nodeName is the name of the node + // +required + NodeName string `json:"nodeName"` + + // currentRevision is the generation of the most recently successful deployment. + // Can not be set on creation of a nodeStatus. Updates must only increase the value. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="must only increase" + // +optional + CurrentRevision int32 `json:"currentRevision,omitempty"` + // targetRevision is the generation of the deployment we're trying to apply. + // Can not be set on creation of a nodeStatus. + // +optional + TargetRevision int32 `json:"targetRevision,omitempty"` + + // lastFailedRevision is the generation of the deployment we tried and failed to deploy. + LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` + // lastFailedTime is the time the last failed revision failed the last time. + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + // lastFailedReason is a machine readable failure reason string. + LastFailedReason string `json:"lastFailedReason,omitempty"` + // lastFailedCount is how often the installer pod of the last failed revision failed. + LastFailedCount int `json:"lastFailedCount,omitempty"` + // lastFallbackCount is how often a fallback to a previous revision happened. + LastFallbackCount int `json:"lastFallbackCount,omitempty"` + // lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + // +listType=atomic + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_authentication.go new file mode 100644 index 000000000..7cc22d1e4 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -0,0 +1,68 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=authentication,operatorOrdering=01 +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true + +// Authentication provides information to configure an operator to manage authentication. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Authentication struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec AuthenticationSpec `json:"spec"` + // +optional + Status AuthenticationStatus `json:"status,omitempty"` +} + +type AuthenticationSpec struct { + OperatorSpec `json:",inline"` +} + +type AuthenticationStatus struct { + // oauthAPIServer holds status specific only to oauth-apiserver + // +optional + OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` + + OperatorStatus `json:",inline"` +} + +type OAuthAPIServerStatus struct { + // latestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go new file mode 100644 index 000000000..b6ef52e93 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -0,0 +1,92 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=cloudcredentials,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/692 +// +openshift:capability=CloudCredential +// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=cloud-credential,operatorOrdering=00 + +// CloudCredential provides a means to configure an operator to manage CredentialsRequests. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CloudCredential struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec CloudCredentialSpec `json:"spec"` + // +optional + Status CloudCredentialStatus `json:"status"` +} + +// CloudCredentialsMode is the specified mode the cloud-credential-operator +// should reconcile CredentialsRequest with +// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough +type CloudCredentialsMode string + +const ( + // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests + // (primarily used for the disconnected VPC use-cases). + CloudCredentialsModeManual CloudCredentialsMode = "Manual" + + // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests + // by minting new users/credentials. + CloudCredentialsModeMint CloudCredentialsMode = "Mint" + + // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests + // by copying the cloud-specific secret data. + CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough" + + // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults): + // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect + // processing of CredentialsRequests + // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode + CloudCredentialsModeDefault CloudCredentialsMode = "" +) + +// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +type CloudCredentialSpec struct { + OperatorSpec `json:",inline"` + // credentialsMode allows informing CCO that it should not attempt to dynamically + // determine the root cloud credentials capabilities, and it should just run in + // the specified mode. + // It also allows putting the operator into "manual" mode if desired. + // Leaving the field in default mode runs CCO so that the cluster's cloud credentials + // will be dynamically probed for capabilities (on supported clouds/platforms). + // Supported modes: + // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" + // Others: Do not set value as other platforms only support running in "Passthrough" + // +optional + CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"` +} + +// CloudCredentialStatus defines the observed status of the cloud-credential-operator. +type CloudCredentialStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CloudCredentialList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []CloudCredential `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_config.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_config.go new file mode 100644 index 000000000..f0d190e6d --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -0,0 +1,60 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=configs,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/612 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 + +// Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components +// on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Config struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Config Operator. + // +required + Spec ConfigSpec `json:"spec"` + + // status defines the observed status of the Config Operator. + // +optional + Status ConfigStatus `json:"status"` +} + +type ConfigSpec struct { + OperatorSpec `json:",inline"` +} + +type ConfigStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []Config `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_console.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_console.go new file mode 100644 index 000000000..e030a65c8 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -0,0 +1,599 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/486 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=console,operatorOrdering=01 + +// Console provides a means to configure an operator to manage the console. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Console struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec ConsoleSpec `json:"spec"` + // +optional + Status ConsoleStatus `json:"status,omitempty"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + OperatorSpec `json:",inline"` + // customization is used to optionally provide a small set of + // customization options to the web console. + // +optional + Customization ConsoleCustomization `json:"customization"` + // providers contains configuration for using specific service providers. + Providers ConsoleProviders `json:"providers"` + // route contains hostname and secret reference that contains the serving certificate. + // If a custom route is specified, a new route will be created with the + // provided hostname, under which console will be available. + // In case of custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. + // The default console route will be maintained to reserve the default hostname + // for console if the custom route is removed. + // If not specified, default route will be used. + // DEPRECATED + // +optional + Route ConsoleConfigRoute `json:"route"` + // plugins defines a list of enabled console plugin names. + // +optional + Plugins []string `json:"plugins,omitempty"` + // ingress allows to configure the alternative ingress for the console. + // This field is intended for clusters without ingress capability, + // where access to routes is not possible. + // +optional + Ingress Ingress `json:"ingress"` +} + +// ConsoleConfigRoute holds information on external route access to console. +// DEPRECATED +type ConsoleConfigRoute struct { + // hostname is the desired custom domain under which console will be available. + Hostname string `json:"hostname"` + // secret points to secret in the openshift-config namespace that contains custom + // certificate and key and needs to be created manually by the cluster admin. + // Referenced Secret is required to contain following key value pairs: + // - "tls.crt" - to specifies custom certificate + // - "tls.key" - to specifies private key of the custom certificate + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + Secret configv1.SecretNameReference `json:"secret"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + OperatorStatus `json:",inline"` +} + +// ConsoleProviders defines a list of optional additional providers of +// functionality to the console. +type ConsoleProviders struct { + // statuspage contains ID for statuspage.io page that provides status info about. + // +optional + Statuspage *StatuspageProvider `json:"statuspage,omitempty"` +} + +// StatuspageProvider provides identity for statuspage account. +type StatuspageProvider struct { + // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + PageID string `json:"pageID"` +} + +// ConsoleCapabilityName defines name of UI capability in the console UI. +type ConsoleCapabilityName string + +const ( + // lightspeedButton is the name for the Lightspeed button HTML element. + LightspeedButton ConsoleCapabilityName = "LightspeedButton" + + // gettingStartedBanner is the name of the 'Getting started resources' banner in the console UI Overview page. + GettingStartedBanner ConsoleCapabilityName = "GettingStartedBanner" +) + +// CapabilityState defines the state of the capability in the console UI. +type CapabilityState string + +const ( + // "Enabled" means that the capability will be rendered in the console UI. + CapabilityEnabled CapabilityState = "Enabled" + // "Disabled" means that the capability will not be rendered in the console UI. + CapabilityDisabled CapabilityState = "Disabled" +) + +// CapabilityVisibility defines the criteria to enable/disable a capability. +// +union +type CapabilityVisibility struct { + // state defines if the capability is enabled or disabled in the console UI. + // Enabling the capability in the console UI is represented by the "Enabled" value. + // Disabling the capability in the console UI is represented by the "Disabled" value. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled" + // +required + State CapabilityState `json:"state"` +} + +// Capabilities contains set of UI capabilities and their state in the console UI. +type Capability struct { + // name is the unique name of a capability. + // Available capabilities are LightspeedButton and GettingStartedBanner. + // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner" + // +required + Name ConsoleCapabilityName `json:"name"` + // visibility defines the visibility state of the capability. + // +required + Visibility CapabilityVisibility `json:"visibility"` +} + +// ThemeMode is the value of the logo theme mode that determines the theme mode in the console UI. +// +kubebuilder:validation:Enum="Dark";"Light" +// +enum +type ThemeMode string + +// ThemeMode values +const ( + // ThemeModeDark represents the dark mode for a console theme. + ThemeModeDark ThemeMode = "Dark" + + // ThemeModeLight represents the light mode for a console theme. + ThemeModeLight ThemeMode = "Light" +) + +// LogoType is the value of the logo type that determines if the logo is for the masthead or the favicon in the console UI. +// The masthead logo is displayed in the masthead and about modal of the console UI. +// +kubebuilder:validation:Enum="Masthead";"Favicon" +// +enum +type LogoType string + +const ( + // Masthead represents the logo in the masthead. + LogoTypeMasthead LogoType = "Masthead" + + // Favicon represents the favicon logo. + LogoTypeFavicon LogoType = "Favicon" +) + +// SourceType defines the source type of the file reference. +// +kubebuilder:validation:Enum="ConfigMap" +// +enum +type SourceType string + +const ( + // SourceTypeConfigMap represents a ConfigMap source. + SourceTypeConfigMap SourceType = "ConfigMap" +) + +// ConfigMapFileReference references a specific file within a ConfigMap. +type ConfigMapFileReference struct { + // name is the name of the ConfigMap. + // name is a required field. + // Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + // Must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` + + // key is the logo key inside the referenced ConfigMap. + // Must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.). + // Must be at most 253 characters in length. + // Must end in a valid file extension. + // A valid file extension must consist of a period followed by 2 to 5 alpha characters. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-zA-Z0-9._-]+$')",message="The ConfigMap key must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.)." + // +kubebuilder:validation:XValidation:rule="self.matches('.*\\\\.[a-zA-Z]{2,5}$')",message="The ConfigMap key must end with a valid file extension (2 to 5 letters)." + // +required + Key string `json:"key"` +} + +// FileReferenceSource is used by the console to locate the specified file containing a custom logo. +// +kubebuilder:validation:XValidation:rule="has(self.from) && self.from == 'ConfigMap' ? has(self.configMap) : !has(self.configMap)",message="configMap is required when from is 'ConfigMap', and forbidden otherwise." +type FileReferenceSource struct { + // from is a required field to specify the source type of the file reference. + // Allowed values are ConfigMap. + // When set to ConfigMap, the file will be sourced from a ConfigMap in the openshift-config namespace. The configMap field must be set when from is set to ConfigMap. + // +required + From SourceType `json:"from"` + + // configMap specifies the ConfigMap sourcing details such as the name of the ConfigMap and the key for the file. + // The ConfigMap must exist in the openshift-config namespace. + // Required when from is "ConfigMap", and forbidden otherwise. + // +optional + ConfigMap *ConfigMapFileReference `json:"configMap"` +} + +// Theme defines a theme mode for the console UI. +type Theme struct { + // mode is used to specify what theme mode a logo will apply to in the console UI. + // mode is a required field that allows values of Dark and Light. + // When set to Dark, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Dark mode. + // When set to Light, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Light mode. + // +required + Mode ThemeMode `json:"mode"` + + // source is used by the console to locate the specified file containing a custom logo. + // source is a required field that references a ConfigMap name and key that contains the custom logo file in the openshift-config namespace. + // You can create it with a command like: + // - 'oc create configmap custom-logos-config --namespace=openshift-config --from-file=/path/to/file' + // The ConfigMap key must include the file extension so that the console serves the file with the correct MIME type. + // The recommended file format for the Masthead and Favicon logos is SVG, but other file formats are allowed if supported by the browser. + // The logo image size must be less than 1 MB due to constraints on the ConfigMap size. + // For more information, see the documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/web_console/customizing-web-console#customizing-web-console + // +required + Source FileReferenceSource `json:"source"` +} + +// Logo defines a configuration based on theme modes for the console UI logo. +type Logo struct { + // type specifies the type of the logo for the console UI. It determines whether the logo is for the masthead or favicon. + // type is a required field that allows values of Masthead and Favicon. + // When set to "Masthead", the logo will be used in the masthead and about modal of the console UI. + // When set to "Favicon", the logo will be used as the favicon of the console UI. + // +required + Type LogoType `json:"type"` + + // themes specifies the themes for the console UI logo. + // themes is a required field that allows a list of themes. Each item in the themes list must have a unique mode and a source field. + // Each mode determines whether the logo is for the dark or light mode of the console UI. + // If a theme is not specified, the default OpenShift logo will be displayed for that theme. + // There must be at least one entry and no more than 2 entries. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=mode + // +required + Themes []Theme `json:"themes"` +} + +// ConsoleCustomization defines a list of optional configuration for the console UI. +// Ensure that Logos and CustomLogoFile cannot be set at the same time. +// +kubebuilder:validation:XValidation:rule="!(has(self.logos) && has(self.customLogoFile))",message="Only one of logos or customLogoFile can be set." +type ConsoleCustomization struct { + // logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. + // logos is an optional field that allows a list of logos. + // Only one of logos or customLogoFile can be set at a time. + // If logos is set, customLogoFile must be unset. + // When specified, there must be at least one entry and no more than 2 entries. + // Each type must appear only once in the list. + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=type + // +optional + Logos []Logo `json:"logos"` + + // capabilities defines an array of capabilities that can be interacted with in the console UI. + // Each capability defines a visual state that can be interacted with the console to render in the UI. + // Available capabilities are LightspeedButton and GettingStartedBanner. + // Each of the available capabilities may appear only once in the list. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=name + // +optional + Capabilities []Capability `json:"capabilities,omitempty"` + // brand is the default branding of the web console which can be overridden by + // providing the brand field. There is a limited set of specific brand options. + // This field controls elements of the console such as the logo. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Enum:=openshift;okd;online;ocp;dedicated;azure;OpenShift;OKD;Online;OCP;Dedicated;Azure;ROSA + Brand Brand `json:"brand,omitempty"` + // documentationBaseURL links to external documentation are shown in various sections + // of the web console. Providing documentationBaseURL will override the default + // documentation URL. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` + DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` + // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog + // instead of the normal OpenShift product name. + // +optional + CustomProductName string `json:"customProductName,omitempty"` + // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // Only one of customLogoFile or logos can be set at a time. + // ConfigMap in the openshift-config namespace. This can be created with a command like + // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. + // Image size must be less than 1 MB due to constraints on the ConfigMap size. + // The ConfigMap key should include a file extension so that the console serves the file + // with the correct MIME type. + // The recommended file format for the logo is SVG, but other file formats are allowed if supported by the browser. + // Deprecated: Use logos instead. + // +optional + CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` + // developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). + // +optional + DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` + // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective + // Project access page which can be used by a project admin to specify roles to other users and + // restrict access within the project. If set, the list will replace the default ClusterRole options. + // +optional + ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` + // quickStarts allows customization of available ConsoleQuickStart resources in console. + // +optional + QuickStarts QuickStarts `json:"quickStarts,omitempty"` + // addPage allows customizing actions on the Add page in developer perspective. + // +optional + AddPage AddPage `json:"addPage,omitempty"` + // perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. + // +listType=map + // +listMapKey=id + // +optional + Perspectives []Perspective `json:"perspectives"` +} + +// ProjectAccess contains options for project access roles +type ProjectAccess struct { + // availableClusterRoles is the list of ClusterRole names that are assignable to users + // through the project access tab. + // +optional + AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` +} + +// CatalogTypesState defines the state of the catalog types based on which the types will be enabled or disabled. +type CatalogTypesState string + +const ( + CatalogTypeEnabled CatalogTypesState = "Enabled" + CatalogTypeDisabled CatalogTypesState = "Disabled" +) + +// DeveloperConsoleCatalogTypes defines the state of the sub-catalog types. +// +kubebuilder:validation:XValidation:rule="self.state == 'Enabled' ? true : !has(self.enabled)",message="enabled is forbidden when state is not Enabled" +// +kubebuilder:validation:XValidation:rule="self.state == 'Disabled' ? true : !has(self.disabled)",message="disabled is forbidden when state is not Disabled" +// +union +type DeveloperConsoleCatalogTypes struct { + // state defines if a list of catalog types should be enabled or disabled. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled"; + // +kubebuilder:default:="Enabled" + // +default="Enabled" + // +required + State CatalogTypesState `json:"state,omitempty"` + // enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. + // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available + // in the console on the cluster configuration page, or when editing the YAML in the console. + // Example: "Devfile", "HelmChart", "BuilderImage" + // If the list is non-empty, a new type will not be shown to the user until it is added to list. + // If the list is empty the complete developer catalog will be shown. + // +listType=set + // +unionMember,optional + Enabled *[]string `json:"enabled,omitempty"` + // disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. + // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available + // in the console on the cluster configuration page, or when editing the YAML in the console. + // Example: "Devfile", "HelmChart", "BuilderImage" + // If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden. + // +listType=set + // +unionMember,optional + Disabled *[]string `json:"disabled,omitempty"` +} + +// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. +type DeveloperConsoleCatalogCustomization struct { + // categories which are shown in the developer catalog. + // +optional + Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` + // types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. + // When omitted, all the sub-catalog types will be shown. + // +optional + Types DeveloperConsoleCatalogTypes `json:"types,omitempty"` +} + +// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. +type DeveloperConsoleCatalogCategoryMeta struct { + // id is an identifier used in the URL to enable deep linking in console. + // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + // +required + ID string `json:"id"` + // label defines a category display label. It is required and must have 1-64 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +required + Label string `json:"label"` + // tags is a list of strings that will match the category. A selected category + // show all items which has at least one overlapping tag between category and item. + // +optional + Tags []string `json:"tags,omitempty"` +} + +// DeveloperConsoleCatalogCategory for the developer console catalog. +type DeveloperConsoleCatalogCategory struct { + // defines top level category ID, label and filter tags. + DeveloperConsoleCatalogCategoryMeta `json:",inline"` + // subcategories defines a list of child categories. + // +optional + Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` +} + +// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. +type QuickStarts struct { + // disabled is a list of ConsoleQuickStart resource names that are not shown to users. + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +// AddPage allows customizing actions on the Add page in developer perspective. +type AddPage struct { + // disabledActions is a list of actions that are not shown to users. + // Each action in the list is represented by its ID. + // +kubebuilder:validation:MinItems=1 + // +optional + DisabledActions []string `json:"disabledActions,omitempty"` +} + +// PerspectiveState defines the visibility state of the perspective. "Enabled" means the perspective is shown. +// "Disabled" means the Perspective is hidden. +// "AccessReview" means access review check is required to show or hide a Perspective. +type PerspectiveState string + +const ( + PerspectiveEnabled PerspectiveState = "Enabled" + PerspectiveDisabled PerspectiveState = "Disabled" + PerspectiveAccessReview PerspectiveState = "AccessReview" +) + +// ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks. +// `required` and `missing` can work together esp. in the case where the cluster admin +// wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty. +// +kubebuilder:validation:MinProperties:=1 +type ResourceAttributesAccessReview struct { + // required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list. + // +optional + Required []authorizationv1.ResourceAttributes `json:"required"` + // missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list. + // +optional + Missing []authorizationv1.ResourceAttributes `json:"missing"` +} + +// PerspectiveVisibility defines the criteria to show/hide a perspective +// +kubebuilder:validation:XValidation:rule="self.state == 'AccessReview' ? has(self.accessReview) : !has(self.accessReview)",message="accessReview configuration is required when state is AccessReview, and forbidden otherwise" +// +union +type PerspectiveVisibility struct { + // state defines the perspective is enabled or disabled or access review check is required. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled";"AccessReview" + // +required + State PerspectiveState `json:"state"` + // accessReview defines required and missing access review checks. + // +optional + AccessReview *ResourceAttributesAccessReview `json:"accessReview,omitempty"` +} + +// Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown +// +kubebuilder:validation:XValidation:rule="has(self.id) && self.id != 'dev'? !has(self.pinnedResources) : true",message="pinnedResources is allowed only for dev and forbidden for other perspectives" +type Perspective struct { + // id defines the id of the perspective. + // Example: "dev", "admin". + // The available perspective ids can be found in the code snippet section next to the yaml editor. + // Incorrect or unknown ids will be ignored. + // +required + ID string `json:"id"` + // visibility defines the state of perspective along with access review checks if needed for that perspective. + // +required + Visibility PerspectiveVisibility `json:"visibility"` + // pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. + // The list of available Kubernetes resources could be read via `kubectl api-resources`. + // The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. + // Incorrect or unknown resources will be ignored. + // +kubebuilder:validation:MaxItems=100 + // +optional + PinnedResources *[]PinnedResourceReference `json:"pinnedResources,omitempty"` +} + +// PinnedResourceReference includes the group, version and type of resource +type PinnedResourceReference struct { + // group is the API Group of the Resource. + // Enter empty string for the core group. + // This value should consist of only lowercase alphanumeric characters, hyphens and periods. + // Example: "", "apps", "build.openshift.io", etc. + // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" + // +required + Group string `json:"group"` + // version is the API Version of the Resource. + // This value should consist of only lowercase alphanumeric characters. + // Example: "v1", "v1beta1", etc. + // +kubebuilder:validation:Pattern:="^[a-z0-9]+$" + // +required + Version string `json:"version"` + // resource is the type that is being referenced. + // It is normally the plural form of the resource kind in lowercase. + // This value should consist of only lowercase alphanumeric characters and hyphens. + // Example: "deployments", "deploymentconfigs", "pods", etc. + // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + // +required + Resource string `json:"resource"` +} + +// Brand is a specific supported brand within the console. +type Brand string + +const ( + // Legacy branding for OpenShift + BrandOpenShiftLegacy Brand = "openshift" + // Legacy branding for The Origin Community Distribution of Kubernetes + BrandOKDLegacy Brand = "okd" + // Legacy branding for OpenShift Online + BrandOnlineLegacy Brand = "online" + // Legacy branding for OpenShift Container Platform + BrandOCPLegacy Brand = "ocp" + // Legacy branding for OpenShift Dedicated + BrandDedicatedLegacy Brand = "dedicated" + // Legacy branding for Azure Red Hat OpenShift + BrandAzureLegacy Brand = "azure" + // Branding for OpenShift + BrandOpenShift Brand = "OpenShift" + // Branding for The Origin Community Distribution of Kubernetes + BrandOKD Brand = "OKD" + // Branding for OpenShift Online + BrandOnline Brand = "Online" + // Branding for OpenShift Container Platform + BrandOCP Brand = "OCP" + // Branding for OpenShift Dedicated + BrandDedicated Brand = "Dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "Azure" + // Branding for Red Hat OpenShift Service on AWS + BrandROSA Brand = "ROSA" +) + +// Ingress allows cluster admin to configure alternative ingress for the console. +type Ingress struct { + // consoleURL is a URL to be used as the base console address. + // If not specified, the console route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // Make sure that appropriate ingress is set up at this URL. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="console url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="console url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ConsoleURL string `json:"consoleURL"` + // clientDownloadsURL is a URL to be used as the address to download client binaries. + // If not specified, the downloads route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="client downloads url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="client downloads url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ClientDownloadsURL string `json:"clientDownloadsURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go new file mode 100644 index 000000000..279990448 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -0,0 +1,404 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterCSIDriver is used to manage and configure CSI driver installed by default +// in OpenShift. An example configuration may look like: +// apiVersion: operator.openshift.io/v1 +// kind: "ClusterCSIDriver" +// metadata: +// name: "ebs.csi.aws.com" +// spec: +// logLevel: Debug + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clustercsidrivers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/701 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=csi-driver,operatorOrdering=01 + +// ClusterCSIDriver object allows management and configuration of a CSI driver operator +// installed by default in OpenShift. Name of the object must be name of the CSI driver +// it operates. See CSIDriverName type for list of allowed values. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterCSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ClusterCSIDriverSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterCSIDriverStatus `json:"status"` +} + +// CSIDriverName is the name of the CSI driver +type CSIDriverName string + +// +kubebuilder:validation:Enum="";Managed;Unmanaged;Removed +// StorageClassStateName defines various configuration states for storageclass management +// and reconciliation by CSI operator. +type StorageClassStateName string + +const ( + // ManagedStorageClass means that the operator is actively managing its storage classes. + // Most manual changes made by cluster admin to storageclass will be wiped away by CSI + // operator if StorageClassState is set to Managed. + ManagedStorageClass StorageClassStateName = "Managed" + // UnmanagedStorageClass means that the operator is not actively managing storage classes. + // If StorageClassState is Unmanaged then CSI operator will not be actively reconciling storage class + // it previously created. This can be useful if cluster admin wants to modify storage class installed + // by CSI operator. + UnmanagedStorageClass StorageClassStateName = "Unmanaged" + // RemovedStorageClass instructs the operator to remove the storage class. + // If StorageClassState is Removed - CSI operator will delete storage classes it created + // previously. This can be useful in clusters where cluster admins want to prevent + // creation of dynamically provisioned volumes but still need rest of the features + // provided by CSI operator and driver. + RemovedStorageClass StorageClassStateName = "Removed" +) + +// If you are adding a new driver name here, ensure that 0000_50_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +const ( + AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" + AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" + AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" + AzureFileCSIDriver CSIDriverName = "file.csi.azure.com" + GCPFilestoreCSIDriver CSIDriverName = "filestore.csi.storage.gke.io" + GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" + CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" + VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" + ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" + OvirtCSIDriver CSIDriverName = "csi.ovirt.org" + KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" + SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io" + AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" + IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io" + IBMPowerVSBlockCSIDriver CSIDriverName = "powervs.csi.ibm.com" + SecretsStoreCSIDriver CSIDriverName = "secrets-store.csi.k8s.io" + SambaCSIDriver CSIDriverName = "smb.csi.k8s.io" +) + +// ClusterCSIDriverSpec is the desired behavior of CSI driver operator +type ClusterCSIDriverSpec struct { + OperatorSpec `json:",inline"` + // storageClassState determines if CSI operator should create and manage storage classes. + // If this field value is empty or Managed - CSI operator will continuously reconcile + // storage class and create if necessary. + // If this field value is Unmanaged - CSI operator will not reconcile any previously created + // storage class. + // If this field value is Removed - CSI operator will delete the storage class it created previously. + // When omitted, this means the user has no opinion and the platform chooses a reasonable default, + // which is subject to change over time. + // The current default behaviour is Managed. + // +optional + StorageClassState StorageClassStateName `json:"storageClassState,omitempty"` + + // driverConfig can be used to specify platform specific driver configuration. + // When omitted, this means no opinion and the platform is left to choose reasonable + // defaults. These defaults are subject to change over time. + // +optional + DriverConfig CSIDriverConfigSpec `json:"driverConfig"` +} + +// CSIDriverType indicates type of CSI driver being configured. +// +kubebuilder:validation:Enum="";AWS;Azure;GCP;IBMCloud;vSphere +type CSIDriverType string + +const ( + AWSDriverType CSIDriverType = "AWS" + AzureDriverType CSIDriverType = "Azure" + GCPDriverType CSIDriverType = "GCP" + IBMCloudDriverType CSIDriverType = "IBMCloud" + VSphereDriverType CSIDriverType = "vSphere" +) + +// CSIDriverConfigSpec defines configuration spec that can be +// used to optionally configure a specific CSI Driver. +// +kubebuilder:validation:XValidation:rule="has(self.driverType) && self.driverType == 'IBMCloud' ? has(self.ibmcloud) : !has(self.ibmcloud)",message="ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise" +// +union +type CSIDriverConfigSpec struct { + // driverType indicates type of CSI driver for which the + // driverConfig is being applied to. + // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. + // Consumers should treat unknown values as a NO-OP. + // +required + // +unionDiscriminator + DriverType CSIDriverType `json:"driverType"` + + // aws is used to configure the AWS CSI driver. + // +optional + AWS *AWSCSIDriverConfigSpec `json:"aws,omitempty"` + + // azure is used to configure the Azure CSI driver. + // +optional + Azure *AzureCSIDriverConfigSpec `json:"azure,omitempty"` + + // gcp is used to configure the GCP CSI driver. + // +optional + GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"` + + // ibmcloud is used to configure the IBM Cloud CSI driver. + // +optional + IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"` + + // vSphere is used to configure the vsphere CSI driver. + // +optional + VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` +} + +// AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver. +type AWSCSIDriverConfigSpec struct { + // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, + // rather than the default KMS key used by AWS. + // The value may be either the ARN or Alias ARN of a KMS key. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$` + // +optional + KMSKeyARN string `json:"kmsKeyARN,omitempty"` + + // efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver. + // +optional + EFSVolumeMetrics *AWSEFSVolumeMetrics `json:"efsVolumeMetrics,omitempty"` +} + +// AWSEFSVolumeMetricsState defines the modes for collecting volume metrics in the AWS EFS CSI Driver. +// This can either enable recursive collection of volume metrics or disable metric collection entirely. +// +kubebuilder:validation:Enum:="RecursiveWalk";"Disabled" +type AWSEFSVolumeMetricsState string + +const ( + // AWSEFSVolumeMetricsRecursiveWalk indicates that volume metrics collection in the AWS EFS CSI Driver + // is performed by recursively walking through the files in the volume. + AWSEFSVolumeMetricsRecursiveWalk AWSEFSVolumeMetricsState = "RecursiveWalk" + + // AWSEFSVolumeMetricsDisabled indicates that volume metrics collection in the AWS EFS CSI Driver is disabled. + AWSEFSVolumeMetricsDisabled AWSEFSVolumeMetricsState = "Disabled" +) + +// AWSEFSVolumeMetrics defines the configuration for volume metrics in the EFS CSI Driver. +// +union +type AWSEFSVolumeMetrics struct { + // state defines the state of metric collection in the AWS EFS CSI Driver. + // This field is required and must be set to one of the following values: Disabled or RecursiveWalk. + // Disabled means no metrics collection will be performed. This is the default value. + // RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. + // This process may result in high CPU and memory usage, depending on the volume size. + // +unionDiscriminator + // +required + State AWSEFSVolumeMetricsState `json:"state"` + + // recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver + // when the state is set to RecursiveWalk. + // +unionMember + // +optional + RecursiveWalk *AWSEFSVolumeMetricsRecursiveWalkConfig `json:"recursiveWalk,omitempty"` +} + +// AWSEFSVolumeMetricsRecursiveWalkConfig defines options for volume metrics in the EFS CSI Driver. +type AWSEFSVolumeMetricsRecursiveWalkConfig struct { + // refreshPeriodMinutes specifies the frequency, in minutes, at which volume metrics are refreshed. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 240. + // The valid range is from 1 to 43200 minutes (30 days). + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=43200 + // +optional + RefreshPeriodMinutes int32 `json:"refreshPeriodMinutes,omitempty"` + + // fsRateLimit defines the rate limit, in goroutines per file system, for processing volume metrics. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 5. + // The valid range is from 1 to 100 goroutines. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=100 + // +optional + FSRateLimit int32 `json:"fsRateLimit,omitempty"` +} + +// AzureDiskEncryptionSet defines the configuration for a disk encryption set. +type AzureDiskEncryptionSet struct { + // subscriptionID defines the Azure subscription that contains the disk encryption set. + // The value should meet the following conditions: + // 1. It should be a 128-bit number. + // 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. + // 3. It should be displayed in five groups separated by hyphens (-). + // 4. The first group should be 8 characters long. + // 5. The second, third, and fourth groups should be 4 characters long. + // 6. The fifth group should be 12 characters long. + // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 + // +required + // +kubebuilder:validation:MaxLength:=36 + // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` + SubscriptionID string `json:"subscriptionID"` + + // resourceGroup defines the Azure resource group that contains the disk encryption set. + // The value should consist of only alphanumberic characters, + // underscores (_), parentheses, hyphens and periods. + // The value should not end in a period and be at most 90 characters in + // length. + // +required + // +kubebuilder:validation:MaxLength:=90 + // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` + ResourceGroup string `json:"resourceGroup"` + + // name is the name of the disk encryption set that will be set on the default storage class. + // The value should consist of only alphanumberic characters, + // underscores (_), hyphens, and be at most 80 characters in length. + // +required + // +kubebuilder:validation:MaxLength:=80 + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + Name string `json:"name"` +} + +// AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver. +type AzureCSIDriverConfigSpec struct { + // diskEncryptionSet sets the cluster default storage class to encrypt volumes with a + // customer-managed encryption set, rather than the default platform-managed keys. + // +optional + DiskEncryptionSet *AzureDiskEncryptionSet `json:"diskEncryptionSet,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // name is the name of the customer-managed encryption key to be used for disk encryption. + // The value should correspond to an existing KMS key and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +required + Name string `json:"name"` + + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // The value should correspond to an existing KMS key ring and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +required + KeyRing string `json:"keyRing"` + + // projectID is the ID of the Project in which the KMS Key Ring exists. + // It must be 6 to 30 lowercase letters, digits, or hyphens. + // It must start with a letter. Trailing hyphens are prohibited. + // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` + // +kubebuilder:validation:MinLength:=6 + // +kubebuilder:validation:MaxLength:=30 + // +required + ProjectID string `json:"projectID"` + + // location is the GCP location in which the Key Ring exists. + // The value must match an existing GCP location, or "global". + // Defaults to global, if not set. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +optional + Location string `json:"location,omitempty"` +} + +// GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver. +type GCPCSIDriverConfigSpec struct { + // kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied + // encryption keys, rather than the default keys managed by GCP. + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` +} + +// IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver. +type IBMCloudCSIDriverConfigSpec struct { + // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use + // for disk encryption of volumes for the default storage classes. + // +required + // +kubebuilder:validation:MaxLength:=154 + // +kubebuilder:validation:MinLength:=144 + // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$` + EncryptionKeyCRN string `json:"encryptionKeyCRN"` +} + +// VSphereCSIDriverConfigSpec defines properties that +// can be configured for vsphere CSI driver. +type VSphereCSIDriverConfigSpec struct { + // topologyCategories indicates tag categories with which + // vcenter resources such as hostcluster or datacenter were tagged with. + // If cluster Infrastructure object has a topology, values specified in + // Infrastructure object will be used and modifications to topologyCategories + // will be rejected. + // +listType=atomic + // +optional + TopologyCategories []string `json:"topologyCategories,omitempty"` + + // globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of + // datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. + // Snapshots can not be disabled using this parameter. + // Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +optional + GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It + // overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VSAN can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +optional + GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. + // It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VVOL can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +optional + GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` + + // maxAllowedBlockVolumesPerNode is an optional configuration parameter that allows setting a custom value for the + // limit of the number of PersistentVolumes attached to a node. In vSphere version 7 this limit was set to 59 by + // default, however in vSphere version 8 this limit was increased to 255. + // Before increasing this value above 59 the cluster administrator needs to ensure that every node forming the + // cluster is updated to ESXi version 8 or higher and that all nodes are running the same version. + // The limit must be between 1 and 255, which matches the vSphere version 8 maximum. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to + // change over time. + // The current default is 59, which matches the limit for vSphere version 7. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=255 + // +openshift:enable:FeatureGate=VSphereConfigurableMaxAllowedBlockVolumesPerNode + // +optional + MaxAllowedBlockVolumesPerNode int32 `json:"maxAllowedBlockVolumesPerNode,omitempty"` +} + +// ClusterCSIDriverStatus is the observed status of CSI driver operator +type ClusterCSIDriverStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCSIDriverList contains a list of ClusterCSIDriver +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterCSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ClusterCSIDriver `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go new file mode 100644 index 000000000..d6d283d36 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -0,0 +1,60 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=csisnapshotcontrollers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/562 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=csi-snapshot-controller,operatorOrdering=01 + +// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CSISnapshotController struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec CSISnapshotControllerSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status CSISnapshotControllerStatus `json:"status"` +} + +// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. +type CSISnapshotControllerSpec struct { + OperatorSpec `json:",inline"` +} + +// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. +type CSISnapshotControllerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSISnapshotControllerList contains a list of CSISnapshotControllers. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CSISnapshotControllerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []CSISnapshotController `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_dns.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_dns.go new file mode 100644 index 000000000..258804786 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -0,0 +1,525 @@ +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=dns,operatorOrdering=00 + +// DNS manages the CoreDNS component to provide a name resolution service +// for pods and services in the cluster. +// +// This supports the DNS-based service discovery specification: +// https://github.com/kubernetes/dns/blob/master/docs/specification.md +// +// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNS struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNS. + Spec DNSSpec `json:"spec,omitempty"` + // status is the most recently observed status of the DNS. + Status DNSStatus `json:"status,omitempty"` +} + +// DNSSpec is the specification of the desired behavior of the DNS. +type DNSSpec struct { + // servers is a list of DNS resolvers that provide name query delegation for one or + // more subdomains outside the scope of the cluster domain. If servers consists of + // more than one Server, longest suffix match will be used to determine the Server. + // + // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", + // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone + // "a.foo.com". + // + // If this field is nil, no servers are created. + // + // +optional + Servers []Server `json:"servers,omitempty"` + + // upstreamResolvers defines a schema for configuring CoreDNS + // to proxy DNS messages to upstream resolvers for the case of the + // default (".") server + // + // If this field is not specified, the upstream used will default to + // /etc/resolv.conf, with policy "sequential" + // + // +optional + UpstreamResolvers UpstreamResolvers `json:"upstreamResolvers"` + + // nodePlacement provides explicit control over the scheduling of DNS + // pods. + // + // Generally, it is useful to run a DNS pod on every node so that DNS + // queries are always handled by a local DNS pod instead of going over + // the network to a DNS pod on another node. However, security policies + // may require restricting the placement of DNS pods to specific nodes. + // For example, if a security policy prohibits pods on arbitrary nodes + // from communicating with the API, a node selector can be specified to + // restrict DNS pods to nodes that are permitted to communicate with the + // API. Conversely, if running DNS pods on nodes with a particular + // taint is desired, a toleration can be specified for that taint. + // + // If unset, defaults are used. See nodePlacement for more details. + // + // +optional + NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"` + + // managementState indicates whether the DNS operator should manage cluster + // DNS + // +optional + ManagementState ManagementState `json:"managementState,omitempty"` + + // operatorLogLevel controls the logging level of the DNS Operator. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // setting operatorLogLevel: Trace will produce extremely verbose logs. + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel DNSLogLevel `json:"operatorLogLevel,omitempty"` + + // logLevel describes the desired logging verbosity for CoreDNS. + // Any one of the following values may be specified: + // * Normal logs errors from upstream resolvers. + // * Debug logs errors, NXDOMAIN responses, and NODATA responses. + // * Trace logs errors and all responses. + // Setting logLevel: Trace will produce extremely verbose logs. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel DNSLogLevel `json:"logLevel,omitempty"` + + // cache describes the caching configuration that applies to all server blocks listed in the Corefile. + // This field allows a cluster admin to optionally configure: + // * positiveTTL which is a duration for which positive responses should be cached. + // * negativeTTL which is a duration for which negative responses should be cached. + // If this is not configured, OpenShift will configure positive and negative caching with a default value that is + // subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is + // 30 seconds or as noted in the respective Corefile for your version of OpenShift. + // +optional + Cache DNSCache `json:"cache,omitempty"` +} + +// DNSCache defines the fields for configuring DNS caching. +type DNSCache struct { + // positiveTTL is optional and specifies the amount of time that a positive response should be cached. + // + // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This + // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, + // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. + // If the configured value is less than 1s, the default value will be used. + // If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted + // otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject + // to change. + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + PositiveTTL metav1.Duration `json:"positiveTTL,omitempty"` + + // negativeTTL is optional and specifies the amount of time that a negative response should be cached. + // + // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This + // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, + // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. + // If the configured value is less than 1s, the default value will be used. + // If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted + // otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject + // to change. + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + NegativeTTL metav1.Duration `json:"negativeTTL,omitempty"` +} + +// +kubebuilder:validation:Enum:=Normal;Debug;Trace +type DNSLogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + DNSLogLevelNormal DNSLogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + DNSLogLevelDebug DNSLogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + DNSLogLevelTrace DNSLogLevel = "Trace" +) + +// Server defines the schema for a server that runs per instance of CoreDNS. +type Server struct { + // name is required and specifies a unique name for the server. Name must comply + // with the Service Name Syntax of rfc6335. + Name string `json:"name"` + // zones is required and specifies the subdomains that Server is authoritative for. + // Zones must conform to the rfc1123 definition of a subdomain. Specifying the + // cluster domain (i.e., "cluster.local") is invalid. + Zones []string `json:"zones"` + // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages + // to upstream resolvers. + ForwardPlugin ForwardPlugin `json:"forwardPlugin"` +} + +// DNSTransport indicates what type of connection should be used. +// +kubebuilder:validation:Enum=TLS;Cleartext;"" +type DNSTransport string + +const ( + // TLSTransport indicates that TLS should be used for the connection. + TLSTransport DNSTransport = "TLS" + + // CleartextTransport indicates that no encryption should be used for + // the connection. + CleartextTransport DNSTransport = "Cleartext" +) + +// DNSTransportConfig groups related configuration parameters used for configuring +// forwarding to upstream resolvers that support DNS-over-TLS. +// +union +type DNSTransportConfig struct { + // transport allows cluster administrators to opt-in to using a DNS-over-TLS + // connection between cluster DNS and an upstream resolver(s). Configuring + // TLS as the transport at this level without configuring a CABundle will + // result in the system certificates being used to verify the serving + // certificate of the upstream resolver(s). + // + // Possible values: + // "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject + // to change over time. The current default is "Cleartext". + // "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality + // as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, + // or wants to switch from "TLS" to "Cleartext" explicitly. + // "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, + // you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default + // per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1. + // + // +optional + // +unionDiscriminator + Transport DNSTransport `json:"transport,omitempty"` + + // tls contains the additional configuration options to use when Transport is set to "TLS". + TLS *DNSOverTLSConfig `json:"tls,omitempty"` +} + +// DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured. +type DNSOverTLSConfig struct { + // serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is + // set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the + // TLS certificate installed in the upstream resolver(s). + // + // + --- + // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + ServerName string `json:"serverName"` + + // caBundle references a ConfigMap that must contain either a single + // CA Certificate or a CA Bundle. This allows cluster administrators to provide their + // own CA or CA bundle for validating the certificate of upstream resolvers. + // + // 1. The configmap must contain a `ca-bundle.crt` key. + // 2. The value must be a PEM encoded CA certificate or CA bundle. + // 3. The administrator must create this configmap in the openshift-config namespace. + // 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. + // + // +optional + CABundle v1.ConfigMapNameReference `json:"caBundle,omitempty"` +} + +// ForwardingPolicy is the policy to use when forwarding DNS requests. +// +kubebuilder:validation:Enum=Random;RoundRobin;Sequential +type ForwardingPolicy string + +const ( + // RandomForwardingPolicy picks a random upstream server for each query. + RandomForwardingPolicy ForwardingPolicy = "Random" + + // RoundRobinForwardingPolicy picks upstream servers in a round-robin order, moving to the next server for each new query. + RoundRobinForwardingPolicy ForwardingPolicy = "RoundRobin" + + // SequentialForwardingPolicy tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + SequentialForwardingPolicy ForwardingPolicy = "Sequential" +) + +// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. +type ForwardPlugin struct { + // upstreams is a list of resolvers to forward name queries for subdomains of Zones. + // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream + // returns an error during the exchange, another resolver is tried from Upstreams. The + // Upstreams are selected in the order specified in Policy. Each upstream is represented + // by an IP address or IP:port if the upstream listens on a port other than 53. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // + // +kubebuilder:validation:MaxItems=15 + Upstreams []string `json:"upstreams"` + + // policy is used to determine the order in which upstream servers are selected for querying. + // Any one of the following values may be specified: + // + // * "Random" picks a random upstream server for each query. + // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. + // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + // + // The default value is "Random" + // + // +optional + // +kubebuilder:default:="Random" + Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` +} + +// UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the +// specific case of the default (".") server. +// It defers from ForwardPlugin in the default values it accepts: +// * At least one upstream should be specified. +// * the default policy is Sequential +type UpstreamResolvers struct { + // upstreams is a list of resolvers to forward name queries for the "." domain. + // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream + // returns an error during the exchange, another resolver is tried from Upstreams. The + // Upstreams are selected in the order specified in Policy. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // If no Upstreams are specified, /etc/resolv.conf is used by default + // + // +optional + // +kubebuilder:validation:MaxItems=15 + // +kubebuilder:default={{"type":"SystemResolvConf"}} + Upstreams []Upstream `json:"upstreams"` + + // policy is used to determine the order in which upstream servers are selected for querying. + // Any one of the following values may be specified: + // + // * "Random" picks a random upstream server for each query. + // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. + // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + // + // The default value is "Sequential" + // + // +optional + // +kubebuilder:default="Sequential" + Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` +} + +// Upstream can either be of type SystemResolvConf, or of type Network. +// +// - For an Upstream of type SystemResolvConf, no further fields are necessary: +// The upstream will be configured to use /etc/resolv.conf. +// - For an Upstream of type Network, a NetworkResolver field needs to be defined +// with an IP address or IP:port if the upstream listens on a port other than 53. +type Upstream struct { + + // type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. + // Type accepts 2 possible values: SystemResolvConf or Network. + // + // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: + // /etc/resolv.conf will be used + // * When Network is used, the Upstream structure must contain at least an Address + // + // +required + Type UpstreamType `json:"type"` + + // address must be defined when Type is set to Network. It will be ignored otherwise. + // It must be a valid ipv4 or ipv6 address. + // + // +optional + Address string `json:"address,omitempty"` + + // port may be defined when Type is set to Network. It will be ignored otherwise. + // Port must be between 65535 + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=53 + Port uint32 `json:"port,omitempty"` +} + +// +kubebuilder:validation:Enum=SystemResolvConf;Network;"" +type UpstreamType string + +const ( + SystemResolveConfType UpstreamType = "SystemResolvConf" + NetworkResolverType UpstreamType = "Network" +) + +// ProtocolStrategy is a preference for the protocol to use for DNS queries. +// + --- +// + When consumers observe an unknown value, they should use the default strategy. +// +kubebuilder:validation:Enum:=TCP;"" +type ProtocolStrategy string + +var ( + // ProtocolStrategyDefault specifies no opinion for DNS protocol. + // If empty, the default behavior of CoreDNS is used. Currently, this means that CoreDNS uses the protocol of the + // originating client request as the upstream protocol. + // Note that the default behavior of CoreDNS is subject to change. + ProtocolStrategyDefault ProtocolStrategy = "" + + // ProtocolStrategyTCP instructs CoreDNS to always use TCP, regardless of the originating client's request protocol. + ProtocolStrategyTCP ProtocolStrategy = "TCP" +) + +// DNSNodePlacement describes the node scheduling configuration for DNS pods. +type DNSNodePlacement struct { + // nodeSelector is the node selector applied to DNS pods. + // + // If empty, the default is used, which is currently the following: + // + // kubernetes.io/os: linux + // + // This default is subject to change. + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to DNS pods. + // + // If empty, the DNS operator sets a toleration for the + // "node-role.kubernetes.io/master" taint. This default is subject to + // change. Specifying tolerations without including a toleration for + // the "node-role.kubernetes.io/master" taint may be risky as it could + // lead to an outage if all worker nodes become unavailable. + // + // Note that the daemon controller adds some tolerations as well. See + // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +const ( + // Available indicates the DNS controller daemonset is available. + DNSAvailable = "Available" +) + +// DNSStatus defines the observed status of the DNS. +type DNSStatus struct { + // clusterIP is the service IP through which this DNS is made available. + // + // In the case of the default DNS, this will be a well known IP that is used + // as the default nameserver for pods that are using the default ClusterFirst DNS policy. + // + // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list + // or used explicitly when performing name resolution from within the cluster. + // Example: dig foo.com @ + // + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +required + ClusterIP string `json:"clusterIP"` + + // clusterDomain is the local cluster DNS domain suffix for DNS services. + // This will be a subdomain as defined in RFC 1034, + // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 + // Example: "cluster.local" + // + // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +required + ClusterDomain string `json:"clusterDomain"` + + // conditions provide information about the state of the DNS on the cluster. + // + // These are the supported DNS conditions: + // + // * Available + // - True if the following conditions are met: + // * DNS controller daemonset is available. + // - False if any of those conditions are unsatisfied. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNSList contains a list of DNS +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DNS `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_etcd.go new file mode 100644 index 000000000..252f3b399 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -0,0 +1,96 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=etcds,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/752 +// +openshift:file-pattern=cvoRunLevel=0000_12,operatorName=etcd,operatorOrdering=01 + +// Etcd provides information to configure an operator to manage etcd. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Etcd struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec EtcdSpec `json:"spec"` + // +optional + Status EtcdStatus `json:"status"` +} + +type EtcdSpec struct { + StaticPodOperatorSpec `json:",inline"` + // HardwareSpeed allows user to change the etcd tuning profile which configures + // the latency parameters for heartbeat interval and leader election timeouts + // allowing the cluster to tolerate longer round-trip-times between etcd members. + // Valid values are "", "Standard" and "Slower". + // "" means no opinion and the platform is left to choose a reasonable default + // which is subject to change without notice. + // +optional + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` + + // backendQuotaGiB sets the etcd backend storage size limit in gibibytes. + // The value should be an integer not less than 8 and not more than 32. + // When not specified, the default value is 8. + // +kubebuilder:default:=8 + // +kubebuilder:validation:Minimum=8 + // +kubebuilder:validation:Maximum=32 + // +kubebuilder:validation:XValidation:rule="self>=oldSelf",message="etcd backendQuotaGiB may not be decreased" + // +openshift:enable:FeatureGate=EtcdBackendQuota + // +default=8 + // +optional + BackendQuotaGiB int32 `json:"backendQuotaGiB,omitempty"` +} + +type EtcdStatus struct { + StaticPodOperatorStatus `json:",inline"` + // +optional + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` +} + +const ( + // StandardHardwareSpeed provides the normal tolerances for hardware speed and latency. + // Currently sets (values subject to change at any time): + // ETCD_HEARTBEAT_INTERVAL: 100ms + // ETCD_LEADER_ELECTION_TIMEOUT: 1000ms + StandardHardwareSpeed ControlPlaneHardwareSpeed = "Standard" + // SlowerHardwareSpeed provides more tolerance for slower hardware and/or higher latency networks. + // Sets (values subject to change): + // ETCD_HEARTBEAT_INTERVAL: 5x Standard + // ETCD_LEADER_ELECTION_TIMEOUT: 2.5x Standard + SlowerHardwareSpeed ControlPlaneHardwareSpeed = "Slower" +) + +// ControlPlaneHardwareSpeed declares valid hardware speed tolerance levels +// +enum +// +kubebuilder:validation:Enum:="";Standard;Slower +type ControlPlaneHardwareSpeed string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPISOperatorConfigList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EtcdList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []Etcd `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_ingress.go new file mode 100644 index 000000000..35b50a8fb --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -0,0 +1,2128 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector +// +kubebuilder:resource:path=ingresscontrollers,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/616 +// +openshift:capability=Ingress +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=ingress,operatorOrdering=00 + +// IngressController describes a managed ingress controller for the cluster. The +// controller can service OpenShift Route and Kubernetes Ingress resources. +// +// When an IngressController is created, a new ingress controller deployment is +// created to allow external traffic to reach the services that expose Ingress +// or Route resources. Updating this resource may lead to disruption for public +// facing network connections as a new ingress controller revision may be rolled +// out. +// +// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers +// +// Whenever possible, sensible defaults for the platform are used. See each +// field for more details. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IngressController struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the IngressController. + Spec IngressControllerSpec `json:"spec,omitempty"` + // status is the most recently observed status of the IngressController. + Status IngressControllerStatus `json:"status,omitempty"` +} + +// IngressControllerSpec is the specification of the desired behavior of the +// IngressController. +type IngressControllerSpec struct { + // domain is a DNS name serviced by the ingress controller and is used to + // configure multiple features: + // + // * For the LoadBalancerService endpoint publishing strategy, domain is + // used to configure DNS records. See endpointPublishingStrategy. + // + // * When using a generated default certificate, the certificate will be valid + // for domain and its subdomains. See defaultCertificate. + // + // * The value is published to individual Route statuses so that end-users + // know where to target external DNS records. + // + // domain must be unique among all IngressControllers, and cannot be + // updated. + // + // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + // + // +optional + Domain string `json:"domain,omitempty"` + + // httpErrorCodePages specifies a configmap with custom error pages. + // The administrator must create this configmap in the openshift-config namespace. + // This configmap should have keys in the format "error-page-.http", + // where is an HTTP error code. + // For example, "error-page-503.http" defines an error page for HTTP 503 responses. + // Currently only error pages for 503 and 404 responses can be customized. + // Each value in the configmap should be the full response, including HTTP headers. + // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + // If this field is empty, the ingress controller uses the default error pages. + HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + + // replicas is the desired number of ingress controller replicas. If unset, + // the default depends on the value of the defaultPlacement field in the + // cluster config.openshift.io/v1/ingresses status. + // + // The value of replicas is set based on the value of a chosen field in the + // Infrastructure CR. If defaultPlacement is set to ControlPlane, the + // chosen field will be controlPlaneTopology. If it is set to Workers the + // chosen field will be infrastructureTopology. Replicas will then be set to 1 + // or 2 based whether the chosen field's value is SingleReplica or + // HighlyAvailable, respectively. + // + // These defaults are subject to change. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // endpointPublishingStrategy is used to publish the ingress controller + // endpoints to other networks, enable load balancer integrations, etc. + // + // If unset, the default is based on + // infrastructure.config.openshift.io/cluster .status.platform: + // + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // AlibabaCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. + // + // endpointPublishingStrategy cannot be updated. + // + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // defaultCertificate is a reference to a secret containing the default + // certificate served by the ingress controller. When Routes don't specify + // their own certificate, defaultCertificate is used. + // + // The secret must contain the following keys and data: + // + // tls.crt: certificate file contents + // tls.key: key file contents + // + // If unset, a wildcard certificate is automatically generated and used. The + // certificate is valid for the ingress controller domain (and subdomains) and + // the generated certificate's CA will be automatically integrated with the + // cluster's trust store. + // + // If a wildcard certificate is used and shared by multiple + // HTTP/2 enabled routes (which implies ALPN) then clients + // (i.e., notably browsers) are at liberty to reuse open + // connections. This means a client can reuse a connection to + // another route and that is likely to fail. This behaviour is + // generally known as connection coalescing. + // + // The in-use certificate (whether generated or user-specified) will be + // automatically integrated with OpenShift's built-in OAuth server. + // + // +optional + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + + // namespaceSelector is used to filter the set of namespaces serviced by the + // ingress controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is used to filter the set of Routes serviced by the ingress + // controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // nodePlacement enables explicit control over the scheduling of the ingress + // controller. + // + // If unset, defaults are used. See NodePlacement for more details. + // + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + + // clientTLS specifies settings for requesting and verifying client + // certificates, which can be used to enable mutual TLS for + // edge-terminated and reencrypt routes. + // + // +optional + ClientTLS ClientTLS `json:"clientTLS"` + + // routeAdmission defines a policy for handling new route claims (for example, + // to allow or deny claims across namespaces). + // + // If empty, defaults will be applied. See specific routeAdmission fields + // for details about their defaults. + // + // +optional + RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` + + // logging defines parameters for what should be logged where. If this + // field is empty, operational logs are enabled but access logs are + // disabled. + // + // +optional + Logging *IngressControllerLogging `json:"logging,omitempty"` + + // httpHeaders defines policy for HTTP headers. + // + // If this field is empty, the default values are used. + // + // +optional + HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"` + + // httpEmptyRequestsPolicy describes how HTTP connections should be + // handled if the connection times out before a request is received. + // Allowed values for this field are "Respond" and "Ignore". If the + // field is set to "Respond", the ingress controller sends an HTTP 400 + // or 408 response, logs the connection (if access logging is enabled), + // and counts the connection in the appropriate metrics. If the field + // is set to "Ignore", the ingress controller closes the connection + // without sending a response, logging the connection, or incrementing + // metrics. The default value is "Respond". + // + // Typically, these connections come from load balancers' health probes + // or Web browsers' speculative connections ("preconnect") and can be + // safely ignored. However, these requests may also be caused by + // network errors, and so setting this field to "Ignore" may impede + // detection and diagnosis of problems. In addition, these requests may + // be caused by port scans, in which case logging empty requests may aid + // in detecting intrusion attempts. + // + // +optional + // +kubebuilder:default:="Respond" + HTTPEmptyRequestsPolicy HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + + // tuningOptions defines parameters for adjusting the performance of + // ingress controller pods. All fields are optional and will use their + // respective defaults if not set. See specific tuningOptions fields for + // more details. + // + // Setting fields within tuningOptions is generally not recommended. The + // default values are suitable for most configurations. + // + // +optional + TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"` + + // unsupportedConfigOverrides allows specifying unsupported + // configuration options. Its use is unsupported. + // + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // httpCompression defines a policy for HTTP traffic compression. + // By default, there is no HTTP compression. + // + // +optional + HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` + + // idleConnectionTerminationPolicy maps directly to HAProxy's + // idle-close-on-response option and controls whether HAProxy + // keeps idle frontend connections open during a soft stop + // (router reload). + // + // Allowed values for this field are "Immediate" and + // "Deferred". The default value is "Immediate". + // + // When set to "Immediate", idle connections are closed + // immediately during router reloads. This ensures immediate + // propagation of route changes but may impact clients + // sensitive to connection resets. + // + // When set to "Deferred", HAProxy will maintain idle + // connections during a soft reload instead of closing them + // immediately. These connections remain open until any of the + // following occurs: + // + // - A new request is received on the connection, in which + // case HAProxy handles it in the old process and closes + // the connection after sending the response. + // + // - HAProxy's `timeout http-keep-alive` duration expires + // (300 seconds in OpenShift's configuration, not + // configurable). + // + // - The client's keep-alive timeout expires, causing the + // client to close the connection. + // + // Setting Deferred can help prevent errors in clients or load + // balancers that do not properly handle connection resets. + // Additionally, this option allows you to retain the pre-2.4 + // HAProxy behaviour: in HAProxy version 2.2 (OpenShift + // versions < 4.14), maintaining idle connections during a + // soft reload was the default behaviour, but starting with + // HAProxy 2.4, the default changed to closing idle + // connections immediately. + // + // Important Consideration: + // + // - Using Deferred will result in temporary inconsistencies + // for the first request on each persistent connection + // after a route update and router reload. This request + // will be processed by the old HAProxy process using its + // old configuration. Subsequent requests will use the + // updated configuration. + // + // Operational Considerations: + // + // - Keeping idle connections open during reloads may lead + // to an accumulation of old HAProxy processes if + // connections remain idle for extended periods, + // especially in environments where frequent reloads + // occur. + // + // - Consider monitoring the number of HAProxy processes in + // the router pods when Deferred is set. + // + // - You may need to enable or adjust the + // `ingress.operator.openshift.io/hard-stop-after` + // duration (configured via an annotation on the + // IngressController resource) in environments with + // frequent reloads to prevent resource exhaustion. + // + // +optional + // +kubebuilder:default:="Immediate" + // +default="Immediate" + IdleConnectionTerminationPolicy IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` +} + +// httpCompressionPolicy turns on compression for the specified MIME types. +// +// This field is optional, and its absence implies that compression should not be enabled +// globally in HAProxy. +// +// If httpCompressionPolicy exists, compression should be enabled only for the specified +// MIME types. +type HTTPCompressionPolicy struct { + // mimeTypes is a list of MIME types that should have compression applied. + // This list can be empty, in which case the ingress controller does not apply compression. + // + // Note: Not all MIME types benefit from compression, but HAProxy will still use resources + // to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) + // formats benefit from compression, but formats that are already compressed (image, + // audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing + // again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2 + // + // +listType=set + MimeTypes []CompressionMIMEType `json:"mimeTypes,omitempty"` +} + +// CompressionMIMEType defines the format of a single MIME type. +// E.g. "text/css; charset=utf-8", "text/html", "text/*", "image/svg+xml", +// "application/octet-stream", "X-custom/customsub", etc. +// +// The format should follow the Content-Type definition in RFC 1341: +// Content-Type := type "/" subtype *[";" parameter] +// - The type in Content-Type can be one of: +// application, audio, image, message, multipart, text, video, or a custom +// type preceded by "X-" and followed by a token as defined below. +// - The token is a string of at least one character, and not containing white +// space, control characters, or any of the characters in the tspecials set. +// - The tspecials set contains the characters ()<>@,;:\"/[]?.= +// - The subtype in Content-Type is also a token. +// - The optional parameter/s following the subtype are defined as: +// token "=" (token / quoted-string) +// - The quoted-string, as defined in RFC 822, is surrounded by double quotes +// and can contain white space plus any character EXCEPT \, ", and CR. +// It can also contain any single ASCII character as long as it is escaped by \. +// +// +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$` +type CompressionMIMEType string + +// NodePlacement describes node scheduling configuration for an ingress +// controller. +type NodePlacement struct { + // nodeSelector is the node selector applied to ingress controller + // deployments. + // + // If set, the specified selector is used and replaces the default. + // + // If unset, the default depends on the value of the defaultPlacement + // field in the cluster config.openshift.io/v1/ingresses status. + // + // When defaultPlacement is Workers, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/worker: '' + // + // When defaultPlacement is ControlPlane, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/master: '' + // + // These defaults are subject to change. + // + // Note that using nodeSelector.matchExpressions is not supported. Only + // nodeSelector.matchLabels may be used. This is a limitation of the + // Kubernetes API: the pod spec does not allow complex expressions for + // node selectors. + // + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. +// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService +type EndpointPublishingStrategyType string + +const ( + // LoadBalancerService publishes the ingress controller using a Kubernetes + // LoadBalancer Service. + LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" + + // HostNetwork publishes the ingress controller on node ports where the + // ingress controller is deployed. + HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" + + // Private does not publish the ingress controller. + PrivateStrategyType EndpointPublishingStrategyType = "Private" + + // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. + NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" +) + +// LoadBalancerScope is the scope at which a load balancer is exposed. +// +kubebuilder:validation:Enum=Internal;External +type LoadBalancerScope string + +var ( + // InternalLoadBalancer is a load balancer that is exposed only on the + // cluster's private network. + InternalLoadBalancer LoadBalancerScope = "Internal" + + // ExternalLoadBalancer is a load balancer that is exposed on the + // cluster's public network (which is typically on the Internet). + ExternalLoadBalancer LoadBalancerScope = "External" +) + +// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" +// or "fd00::/8"). +// +kubebuilder:validation:Pattern=`(^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)` +// + --- +// + The regex for the IPv4 CIDR range was taken from other CIDR fields in the OpenShift API +// + and the one for the IPv6 CIDR range was taken from +// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ +// + The resulting regex is an OR of both regexes. +type CIDR string + +// LoadBalancerStrategy holds parameters for a load balancer. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule="!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.aws) || !has(self.providerParameters.aws.networkLoadBalancer) || !has(self.providerParameters.aws.networkLoadBalancer.eipAllocations)",message="eipAllocations are forbidden when the scope is Internal." +// +kubebuilder:validation:XValidation:rule=`!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.openstack) || !has(self.providerParameters.openstack.floatingIP) || self.providerParameters.openstack.floatingIP == ""`,message="cannot specify a floating ip when scope is internal" +type LoadBalancerStrategy struct { + // scope indicates the scope at which the load balancer is exposed. + // Possible values are "External" and "Internal". + // + // +required + Scope LoadBalancerScope `json:"scope"` + + // allowedSourceRanges specifies an allowlist of IP address ranges to which + // access to the load balancer should be restricted. Each range must be + // specified using CIDR notation (e.g. "10.0.0.0/8" or "fd00::/8"). If no range is + // specified, "0.0.0.0/0" for IPv4 and "::/0" for IPv6 are used by default, + // which allows all source addresses. + // + // To facilitate migration from earlier versions of OpenShift that did + // not have the allowedSourceRanges field, you may set the + // service.beta.kubernetes.io/load-balancer-source-ranges annotation on + // the "router-" service in the + // "openshift-ingress" namespace, and this annotation will take + // effect if allowedSourceRanges is empty on OpenShift 4.12. + // + // +nullable + // +optional + // +listType=atomic + AllowedSourceRanges []CIDR `json:"allowedSourceRanges,omitempty"` + + // providerParameters holds desired load balancer information specific to + // the underlying infrastructure provider. + // + // If empty, defaults will be applied. See specific providerParameters + // fields for details about their defaults. + // + // +optional + ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"` + + // dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record + // associated with the load balancer service will be managed by + // the ingress operator. It defaults to Managed. + // Valid values are: Managed and Unmanaged. + // + // +kubebuilder:default:="Managed" + // +required + // +default="Managed" + DNSManagementPolicy LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` +} + +// LoadBalancerDNSManagementPolicy is a policy for configuring how +// ingresscontrollers manage DNS. +// +// +kubebuilder:validation:Enum=Managed;Unmanaged +type LoadBalancerDNSManagementPolicy string + +const ( + // ManagedLoadBalancerDNS specifies that the operator manages + // a wildcard DNS record for the ingresscontroller. + ManagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Managed" + // UnmanagedLoadBalancerDNS specifies that the operator does not manage + // any wildcard DNS record for the ingresscontroller. + UnmanagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Unmanaged" +) + +// ProviderLoadBalancerParameters holds desired load balancer information +// specific to the underlying infrastructure provider. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'OpenStack' ? true : !has(self.openstack)",message="openstack is not permitted when type is not OpenStack" +// +union +type ProviderLoadBalancerParameters struct { + // type is the underlying infrastructure provider for the load balancer. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", + // "OpenStack", and "VSphere". + // + // +unionDiscriminator + // +required + Type LoadBalancerProviderType `json:"type"` + + // aws provides configuration settings that are specific to AWS + // load balancers. + // + // If empty, defaults will be applied. See specific aws fields for + // details about their defaults. + // + // +optional + AWS *AWSLoadBalancerParameters `json:"aws,omitempty"` + + // gcp provides configuration settings that are specific to GCP + // load balancers. + // + // If empty, defaults will be applied. See specific gcp fields for + // details about their defaults. + // + // +optional + GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` + + // ibm provides configuration settings that are specific to IBM Cloud + // load balancers. + // + // If empty, defaults will be applied. See specific ibm fields for + // details about their defaults. + // + // +optional + IBM *IBMLoadBalancerParameters `json:"ibm,omitempty"` + + // openstack provides configuration settings that are specific to OpenStack + // load balancers. + // + // If empty, defaults will be applied. See specific openstack fields for + // details about their defaults. + // + // +optional + OpenStack *OpenStackLoadBalancerParameters `json:"openstack,omitempty"` +} + +// LoadBalancerProviderType is the underlying infrastructure provider for the +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", +// "OpenStack", and "VSphere". +// +// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM +type LoadBalancerProviderType string + +const ( + AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" + AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" + GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" + OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" + VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" + IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" + BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" + AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud" + NutanixLoadBalancerProvider LoadBalancerProviderType = "Nutanix" +) + +// AWSLoadBalancerParameters provides configuration settings that are +// specific to AWS load balancers. +// +union +type AWSLoadBalancerParameters struct { + // type is the type of AWS load balancer to instantiate for an ingresscontroller. + // + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // + // +unionDiscriminator + // +required + Type AWSLoadBalancerType `json:"type"` + + // classicLoadBalancerParameters holds configuration parameters for an AWS + // classic load balancer. Present only if type is Classic. + // + // +optional + ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"` + + // networkLoadBalancerParameters holds configuration parameters for an AWS + // network load balancer. Present only if type is NLB. + // + // +optional + NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"` +} + +// AWSLoadBalancerType is the type of AWS load balancer to instantiate. +// +kubebuilder:validation:Enum=Classic;NLB +type AWSLoadBalancerType string + +const ( + AWSClassicLoadBalancer AWSLoadBalancerType = "Classic" + AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB" +) + +// AWSSubnets contains a list of references to AWS subnets by +// ID or name. +// +kubebuilder:validation:XValidation:rule=`has(self.ids) && has(self.names) ? size(self.ids + self.names) <= 10 : true`,message="the total number of subnets cannot exceed 10" +// +kubebuilder:validation:XValidation:rule=`has(self.ids) && self.ids.size() > 0 || has(self.names) && self.names.size() > 0`,message="must specify at least 1 subnet name or id" +type AWSSubnets struct { + // ids specifies a list of AWS subnets by subnet ID. + // Subnet IDs must start with "subnet-", consist only + // of alphanumeric characters, must be exactly 24 + // characters long, must be unique, and the total + // number of subnets specified by ids and names + // must not exceed 10. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="subnet ids cannot contain duplicates" + // + Note: Though it may seem redundant, MaxItems is necessary to prevent exceeding of the cost budget for the validation rules. + // +kubebuilder:validation:MaxItems=10 + IDs []AWSSubnetID `json:"ids,omitempty"` + + // names specifies a list of AWS subnets by subnet name. + // Subnet names must not start with "subnet-", must not + // include commas, must be under 256 characters in length, + // must be unique, and the total number of subnets + // specified by ids and names must not exceed 10. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="subnet names cannot contain duplicates" + // + Note: Though it may seem redundant, MaxItems is necessary to prevent exceeding of the cost budget for the validation rules. + // +kubebuilder:validation:MaxItems=10 + Names []AWSSubnetName `json:"names,omitempty"` +} + +// AWSSubnetID is a reference to an AWS subnet ID. +// +kubebuilder:validation:MinLength=24 +// +kubebuilder:validation:MaxLength=24 +// +kubebuilder:validation:Pattern=`^subnet-[0-9A-Za-z]+$` +type AWSSubnetID string + +// AWSSubnetName is a reference to an AWS subnet name. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=256 +// +kubebuilder:validation:XValidation:rule=`!self.contains(',')`,message="subnet name cannot contain a comma" +// +kubebuilder:validation:XValidation:rule=`!self.startsWith('subnet-')`,message="subnet name cannot start with 'subnet-'" +type AWSSubnetName string + +// GCPLoadBalancerParameters provides configuration settings that are +// specific to GCP load balancers. +type GCPLoadBalancerParameters struct { + // clientAccess describes how client access is restricted for internal + // load balancers. + // + // Valid values are: + // * "Global": Specifying an internal load balancer with Global client access + // allows clients from any region within the VPC to communicate with the load + // balancer. + // + // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + // + // * "Local": Specifying an internal load balancer with Local client access + // means only clients within the same region (and VPC) as the GCP load balancer + // can communicate with the load balancer. Note that this is the default behavior. + // + // https://cloud.google.com/load-balancing/docs/internal#client_access + // + // +optional + ClientAccess GCPClientAccess `json:"clientAccess,omitempty"` +} + +// GCPClientAccess describes how client access is restricted for internal +// load balancers. +// +kubebuilder:validation:Enum=Global;Local +type GCPClientAccess string + +const ( + GCPGlobalAccess GCPClientAccess = "Global" + GCPLocalAccess GCPClientAccess = "Local" +) + +// IBMLoadBalancerParameters provides configuration settings that are +// specific to IBM Cloud load balancers. +type IBMLoadBalancerParameters struct { + // protocol specifies whether the load balancer uses PROXY protocol to forward connections to + // the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: + // "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas" + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // Valid values for protocol are TCP, PROXY and omitted. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is TCP, without the proxy protocol enabled. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// OpenStackLoadBalancerParameters provides configuration settings that are +// specific to OpenStack load balancers. +type OpenStackLoadBalancerParameters struct { + // loadBalancerIP is tombstoned since the field was replaced by floatingIP. + // LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + + // floatingIP specifies the IP address that the load balancer will use. + // When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. + // When specified, the floating IP has to be pre-created. If the + // specified value is not a floating IP or is already claimed, the + // OpenStack cloud provider won't be able to provision the load + // balancer. + // This field may only be used if the IngressController has External scope. + // This value must be a valid IPv4 or IPv6 address. + // + --- + // + Note: this field is meant to be set by the ingress controller + // + to populate the `Service.Spec.LoadBalancerIP` field which has been + // + deprecated in Kubernetes: + // + https://github.com/kubernetes/kubernetes/pull/107235 + // + However, the field is still used by cloud-provider-openstack to reconcile + // + the floating IP that we attach to the external load balancer. + // + // +kubebuilder:validation:XValidation:rule="isIP(self)",message="floatingIP must be a valid IPv4 or IPv6 address" + // +optional + FloatingIP string `json:"floatingIP,omitempty"` +} + +// AWSClassicLoadBalancerParameters holds configuration parameters for an +// AWS Classic load balancer. +type AWSClassicLoadBalancerParameters struct { + // connectionIdleTimeout specifies the maximum time period that a + // connection may be idle before the load balancer closes the + // connection. The value must be parseable as a time duration value; + // see . A nil or zero value + // means no opinion, in which case a default value is used. The default + // value for this field is 60s. This default is subject to change. + // + // +kubebuilder:validation:Format=duration + // +optional + ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"` + + // subnets specifies the subnets to which the load balancer will + // attach. The subnets may be specified by either their + // ID or name. The total number of subnets is limited to 10. + // + // In order for the load balancer to be provisioned with subnets, + // each subnet must exist, each subnet must be from a different + // availability zone, and the load balancer service must be + // recreated to pick up new values. + // + // When omitted from the spec, the subnets will be auto-discovered + // for each availability zone. Auto-discovered subnets are not reported + // in the status of the IngressController object. + // + // +optional + // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS + Subnets *AWSSubnets `json:"subnets,omitempty"` +} + +// AWSNetworkLoadBalancerParameters holds configuration parameters for an +// AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +type AWSNetworkLoadBalancerParameters struct { + // subnets specifies the subnets to which the load balancer will + // attach. The subnets may be specified by either their + // ID or name. The total number of subnets is limited to 10. + // + // In order for the load balancer to be provisioned with subnets, + // each subnet must exist, each subnet must be from a different + // availability zone, and the load balancer service must be + // recreated to pick up new values. + // + // When omitted from the spec, the subnets will be auto-discovered + // for each availability zone. Auto-discovered subnets are not reported + // in the status of the IngressController object. + // + // +optional + // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS + Subnets *AWSSubnets `json:"subnets,omitempty"` + + // eipAllocations is a list of IDs for Elastic IP (EIP) addresses that + // are assigned to the Network Load Balancer. + // The following restrictions apply: + // + // eipAllocations can only be used with external scope, not internal. + // An EIP can be allocated to only a single IngressController. + // The number of EIP allocations must match the number of subnets that are used for the load balancer. + // Each EIP allocation must be unique. + // A maximum of 10 EIP allocations are permitted. + // + // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html for general + // information about configuration, characteristics, and limitations of Elastic IP addresses. + // + // +openshift:enable:FeatureGate=SetEIPForNLBIngressController + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="eipAllocations cannot contain duplicates" + // +kubebuilder:validation:MaxItems=10 + EIPAllocations []EIPAllocation `json:"eipAllocations"` +} + +// EIPAllocation is an ID for an Elastic IP (EIP) address that can be allocated to an ELB in the AWS environment. +// Values must begin with `eipalloc-` followed by exactly 17 hexadecimal (`[0-9a-fA-F]`) characters. +// + Explanation of the regex `^eipalloc-[0-9a-fA-F]{17}$` for validating value of the EIPAllocation: +// + ^eipalloc- ensures the string starts with "eipalloc-". +// + [0-9a-fA-F]{17} matches exactly 17 hexadecimal characters (0-9, a-f, A-F). +// + $ ensures the string ends after the 17 hexadecimal characters. +// + Example of Valid and Invalid values: +// + eipalloc-1234567890abcdef1 is valid. +// + eipalloc-1234567890abcde is not valid (too short). +// + eipalloc-1234567890abcdefg is not valid (contains a non-hex character 'g'). +// + Max length is calculated as follows: +// + eipalloc- = 9 chars and 17 hexadecimal chars after `-` +// + So, total is 17 + 9 = 26 chars required for value of an EIPAllocation. +// +// +kubebuilder:validation:MinLength=26 +// +kubebuilder:validation:MaxLength=26 +// +kubebuilder:validation:XValidation:rule=`self.startsWith('eipalloc-')`,message="eipAllocations should start with 'eipalloc-'" +// +kubebuilder:validation:XValidation:rule=`self.split("-", 2)[1].matches('[0-9a-fA-F]{17}$')`,message="eipAllocations must be 'eipalloc-' followed by exactly 17 hexadecimal characters (0-9, a-f, A-F)" +type EIPAllocation string + +// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing +// strategy. +type HostNetworkStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` + + // httpPort is the port on the host which should be used to listen for + // HTTP requests. This field should be set when port 80 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When the value is 0 or is not specified it defaults to 80. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=80 + // +optional + HTTPPort int32 `json:"httpPort,omitempty"` + + // httpsPort is the port on the host which should be used to listen for + // HTTPS requests. This field should be set when port 443 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When the value is 0 or is not specified it defaults to 443. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=443 + // +optional + HTTPSPort int32 `json:"httpsPort,omitempty"` + + // statsPort is the port on the host where the stats from the router are + // published. The value should not coincide with the NodePort range of the + // cluster. If an external load balancer is configured to forward connections + // to this IngressController, the load balancer should use this port for + // health checks. The load balancer can send HTTP probes on this port on a + // given node, with the path /healthz/ready to determine if the ingress + // controller is ready to receive traffic on the node. For proper operation + // the load balancer must not forward traffic to a node until the health + // check reports ready. The load balancer should also stop forwarding requests + // within a maximum of 45 seconds after /healthz/ready starts reporting + // not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with + // a threshold of two successful or failed requests to become healthy or + // unhealthy respectively, are well-tested values. When the value is 0 or + // is not specified it defaults to 1936. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=1936 + // +optional + StatsPort int32 `json:"statsPort,omitempty"` +} + +// PrivateStrategy holds parameters for the Private endpoint publishing +// strategy. +type PrivateStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. +type NodePortStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IngressControllerProtocol specifies whether PROXY protocol is enabled or not. +// +kubebuilder:validation:Enum="";TCP;PROXY +type IngressControllerProtocol string + +const ( + DefaultProtocol IngressControllerProtocol = "" + TCPProtocol IngressControllerProtocol = "TCP" + ProxyProtocol IngressControllerProtocol = "PROXY" +) + +// EndpointPublishingStrategy is a way to publish the endpoints of an +// IngressController, and represents the type and any additional configuration +// for a specific type. +// +union +type EndpointPublishingStrategy struct { + // type is the publishing strategy to use. Valid values are: + // + // * LoadBalancerService + // + // Publishes the ingress controller using a Kubernetes LoadBalancer Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A LoadBalancer Service is created to publish the deployment. + // + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + // + // If domain is set, a wildcard DNS record will be managed to point at the + // LoadBalancer Service's external name. DNS records are managed only in DNS + // zones defined by dns.config.openshift.io/cluster .spec.publicZone and + // .spec.privateZone. + // + // Wildcard DNS management is currently supported only on the AWS, Azure, + // and GCP platforms. + // + // * HostNetwork + // + // Publishes the ingress controller on node ports where the ingress controller + // is deployed. + // + // In this configuration, the ingress controller deployment uses host + // networking, bound to node ports 80 and 443. The user is responsible for + // configuring an external load balancer to publish the ingress controller via + // the node ports. + // + // * Private + // + // Does not publish the ingress controller. + // + // In this configuration, the ingress controller deployment uses container + // networking, and is not explicitly published. The user must manually publish + // the ingress controller. + // + // * NodePortService + // + // Publishes the ingress controller using a Kubernetes NodePort Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A NodePort Service is created to publish the deployment. The + // specific node ports are dynamically allocated by OpenShift; however, to + // support static port allocations, user changes to the node port + // field of the managed NodePort Service will preserved. + // + // +unionDiscriminator + // +required + Type EndpointPublishingStrategyType `json:"type"` + + // loadBalancer holds parameters for the load balancer. Present only if + // type is LoadBalancerService. + // +optional + LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` + + // hostNetwork holds parameters for the HostNetwork endpoint publishing + // strategy. Present only if type is HostNetwork. + // +optional + HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` + + // private holds parameters for the Private endpoint publishing + // strategy. Present only if type is Private. + // +optional + Private *PrivateStrategy `json:"private,omitempty"` + + // nodePort holds parameters for the NodePortService endpoint publishing strategy. + // Present only if type is NodePortService. + // +optional + NodePort *NodePortStrategy `json:"nodePort,omitempty"` +} + +// ClientCertificatePolicy describes the policy for client certificates. +// +kubebuilder:validation:Enum="";Required;Optional +type ClientCertificatePolicy string + +const ( + // ClientCertificatePolicyRequired indicates that a client certificate + // should be required. + ClientCertificatePolicyRequired ClientCertificatePolicy = "Required" + + // ClientCertificatePolicyOptional indicates that a client certificate + // should be requested but not required. + ClientCertificatePolicyOptional ClientCertificatePolicy = "Optional" +) + +// ClientTLS specifies TLS configuration to enable client-to-server +// authentication, which can be used for mutual TLS. +type ClientTLS struct { + // clientCertificatePolicy specifies whether the ingress controller + // requires clients to provide certificates. This field accepts the + // values "Required" or "Optional". + // + // Note that the ingress controller only checks client certificates for + // edge-terminated and reencrypt TLS routes; it cannot check + // certificates for cleartext HTTP or passthrough TLS routes. + // + // +required + ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"` + + // clientCA specifies a configmap containing the PEM-encoded CA + // certificate bundle that should be used to verify a client's + // certificate. The administrator must create this configmap in the + // openshift-config namespace. + // + // +required + ClientCA configv1.ConfigMapNameReference `json:"clientCA"` + + // allowedSubjectPatterns specifies a list of regular expressions that + // should be matched against the distinguished name on a valid client + // certificate to filter requests. The regular expressions must use + // PCRE syntax. If this list is empty, no filtering is performed. If + // the list is nonempty, then at least one pattern must match a client + // certificate's distinguished name or else the ingress controller + // rejects the certificate and denies the connection. + // + // +listType=atomic + // +optional + AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` +} + +// RouteAdmissionPolicy is an admission policy for allowing new route claims. +type RouteAdmissionPolicy struct { + // namespaceOwnership describes how host name claims across namespaces should + // be handled. + // + // Value must be one of: + // + // - Strict: Do not allow routes in different namespaces to claim the same host. + // + // - InterNamespaceAllowed: Allow routes to claim different paths of the same + // host name across namespaces. + // + // If empty, the default is Strict. + // +optional + NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + // wildcardPolicy describes how routes with wildcard policies should + // be handled for the ingress controller. WildcardPolicy controls use + // of routes [1] exposed by the ingress controller based on the route's + // wildcard policy. + // + // [1] https://github.com/openshift/api/blob/master/route/v1/types.go + // + // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + // will cause admitted routes with a wildcard policy of Subdomain to stop + // working. These routes must be updated to a wildcard policy of None to be + // readmitted by the ingress controller. + // + // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. + // + // If empty, defaults to "WildcardsDisallowed". + // + WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"` +} + +// WildcardPolicy is a route admission policy component that describes how +// routes with a wildcard policy should be handled. +// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed +type WildcardPolicy string + +const ( + // WildcardPolicyAllowed indicates routes with any wildcard policy are + // admitted by the ingress controller. + WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed" + + // WildcardPolicyDisallowed indicates only routes with a wildcard policy + // of None are admitted by the ingress controller. + WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed" +) + +// NamespaceOwnershipCheck is a route admission policy component that describes +// how host name claims across namespaces should be handled. +// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict +type NamespaceOwnershipCheck string + +const ( + // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. + InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" + + // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. + StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" +) + +// LoggingDestinationType is a type of destination to which to send log +// messages. +// +// +kubebuilder:validation:Enum=Container;Syslog +type LoggingDestinationType string + +const ( + // Container sends log messages to a sidecar container. + ContainerLoggingDestinationType LoggingDestinationType = "Container" + + // Syslog sends log messages to a syslog endpoint. + SyslogLoggingDestinationType LoggingDestinationType = "Syslog" + + // ContainerLoggingSidecarContainerName is the name of the container + // with the log output in an ingress controller pod when container + // logging is used. + ContainerLoggingSidecarContainerName = "logs" +) + +// SyslogLoggingDestinationParameters describes parameters for the Syslog +// logging destination type. +type SyslogLoggingDestinationParameters struct { + // address is the IP address of the syslog endpoint that receives log + // messages. + // + // +required + Address string `json:"address"` + + // port is the UDP port number of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +required + Port uint32 `json:"port"` + + // facility specifies the syslog facility of log messages. + // + // If this field is empty, the facility is "local1". + // + // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + Facility string `json:"facility,omitempty"` + + // maxLength is the maximum length of the log message. + // + // Valid values are integers in the range 480 to 4096, inclusive. + // + // When omitted, the default value is 1024. + // + // +kubebuilder:validation:Maximum=4096 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +default:=1024 + // +optional + MaxLength uint32 `json:"maxLength,omitempty"` +} + +// ContainerLoggingDestinationParameters describes parameters for the Container +// logging destination type. +type ContainerLoggingDestinationParameters struct { + // maxLength is the maximum length of the log message. + // + // Valid values are integers in the range 480 to 8192, inclusive. + // + // When omitted, the default value is 1024. + // + // +kubebuilder:validation:Maximum=8192 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +default:=1024 + // +optional + MaxLength int32 `json:"maxLength,omitempty"` +} + +// LoggingDestination describes a destination for log messages. +// +union +type LoggingDestination struct { + // type is the type of destination for logs. It must be one of the + // following: + // + // * Container + // + // The ingress operator configures the sidecar container named "logs" on + // the ingress controller pod and configures the ingress controller to + // write logs to the sidecar. The logs are then available as container + // logs. The expectation is that the administrator configures a custom + // logging solution that reads logs from this sidecar. Note that using + // container logs means that logs may be dropped if the rate of logs + // exceeds the container runtime's or the custom logging solution's + // capacity. + // + // * Syslog + // + // Logs are sent to a syslog endpoint. The administrator must specify + // an endpoint that can receive syslog messages. The expectation is + // that the administrator has configured a custom syslog instance. + // + // +unionDiscriminator + // +required + Type LoggingDestinationType `json:"type"` + + // syslog holds parameters for a syslog endpoint. Present only if + // type is Syslog. + // + // +optional + Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"` + + // container holds parameters for the Container logging destination. + // Present only if type is Container. + // + // +optional + Container *ContainerLoggingDestinationParameters `json:"container,omitempty"` +} + +// IngressControllerCaptureHTTPHeader describes an HTTP header that should be +// captured. +type IngressControllerCaptureHTTPHeader struct { + // name specifies a header name. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +required + Name string `json:"name"` + + // maxLength specifies a maximum length for the header value. If a + // header value exceeds this length, the value will be truncated in the + // log message. Note that the ingress controller may impose a separate + // bound on the total length of HTTP headers in a request. + // + // +kubebuilder:validation:Minimum=1 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the +// IngressController captures. +type IngressControllerCaptureHTTPHeaders struct { + // request specifies which HTTP request headers to capture. + // + // If this field is empty, no request headers are captured. + // + // +nullable + // +optional + // +listType=atomic + Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"` + + // response specifies which HTTP response headers to capture. + // + // If this field is empty, no response headers are captured. + // + // +nullable + // +optional + // +listType=atomic + Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"` +} + +// CookieMatchType indicates the type of matching used against cookie names to +// select a cookie for capture. +// +kubebuilder:validation:Enum=Exact;Prefix +type CookieMatchType string + +const ( + // CookieMatchTypeExact indicates that an exact string match should be + // performed. + CookieMatchTypeExact CookieMatchType = "Exact" + // CookieMatchTypePrefix indicates that a string prefix match should be + // performed. + CookieMatchTypePrefix CookieMatchType = "Prefix" +) + +// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be +// captured. +type IngressControllerCaptureHTTPCookie struct { + IngressControllerCaptureHTTPCookieUnion `json:",inline"` + + // maxLength specifies a maximum length of the string that will be + // logged, which includes the cookie name, cookie value, and + // one-character delimiter. If the log entry exceeds this length, the + // value will be truncated in the log message. Note that the ingress + // controller may impose a separate bound on the total length of HTTP + // headers in a request. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured. +// +union +type IngressControllerCaptureHTTPCookieUnion struct { + // matchType specifies the type of match to be performed on the cookie + // name. Allowed values are "Exact" for an exact string match and + // "Prefix" for a string prefix match. If "Exact" is specified, a name + // must be specified in the name field. If "Prefix" is provided, a + // prefix must be specified in the namePrefix field. For example, + // specifying matchType "Prefix" and namePrefix "foo" will capture a + // cookie named "foo" or "foobar" but not one named "bar". The first + // matching cookie is captured. + // + // +unionDiscriminator + // +required + MatchType CookieMatchType `json:"matchType"` + + // name specifies a cookie name. Its value must be a valid HTTP cookie + // name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + Name string `json:"name"` + + // namePrefix specifies a cookie name prefix. Its value must be a valid + // HTTP cookie name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + NamePrefix string `json:"namePrefix"` +} + +// LoggingPolicy indicates how an event should be logged. +// +kubebuilder:validation:Enum=Log;Ignore +type LoggingPolicy string + +const ( + // LoggingPolicyLog indicates that an event should be logged. + LoggingPolicyLog LoggingPolicy = "Log" + // LoggingPolicyIgnore indicates that an event should not be logged. + LoggingPolicyIgnore LoggingPolicy = "Ignore" +) + +// AccessLogging describes how client requests should be logged. +type AccessLogging struct { + // destination is where access logs go. + // + // +required + Destination LoggingDestination `json:"destination"` + + // httpLogFormat specifies the format of the log message for an HTTP + // request. + // + // If this field is empty, log messages use the implementation's default + // HTTP log format. For HAProxy's default HTTP log format, see the + // HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // Note that this format only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). It does not affect the log format for TLS passthrough + // connections. + // + // +optional + HttpLogFormat string `json:"httpLogFormat,omitempty"` + + // httpCaptureHeaders defines HTTP headers that should be captured in + // access logs. If this field is empty, no headers are captured. + // + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be captured for TLS passthrough + // connections. + // + // +optional + HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"` + + // httpCaptureCookies specifies HTTP cookies that should be captured in + // access logs. If this field is empty, no cookies are captured. + // + // +nullable + // +optional + // +kubebuilder:validation:MaxItems=1 + // +listType=atomic + HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"` + + // logEmptyRequests specifies how connections on which no request is + // received should be logged. Typically, these empty requests come from + // load balancers' health probes or Web browsers' speculative + // connections ("preconnect"), in which case logging these requests may + // be undesirable. However, these requests may also be caused by + // network errors, in which case logging empty requests may be useful + // for diagnosing the errors. In addition, these requests may be caused + // by port scans, in which case logging empty requests may aid in + // detecting intrusion attempts. Allowed values for this field are + // "Log" and "Ignore". The default value is "Log". + // + // +optional + // +kubebuilder:default:="Log" + LogEmptyRequests LoggingPolicy `json:"logEmptyRequests,omitempty"` +} + +// IngressControllerLogging describes what should be logged where. +type IngressControllerLogging struct { + // access describes how the client requests should be logged. + // + // If this field is empty, access logging is disabled. + // + // +optional + Access *AccessLogging `json:"access,omitempty"` +} + +// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers. +// +// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never +type IngressControllerHTTPHeaderPolicy string + +const ( + // AppendHTTPHeaderPolicy appends the header, preserving any existing header. + AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append" + // ReplaceHTTPHeaderPolicy sets the header, removing any existing header. + ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace" + // IfNoneHTTPHeaderPolicy sets the header if it is not already set. + IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone" + // NeverHTTPHeaderPolicy never sets the header, preserving any existing + // header. + NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never" +) + +// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a +// unique id header. +type IngressControllerHTTPUniqueIdHeaderPolicy struct { + // name specifies the name of the HTTP header (for example, "unique-id") + // that the ingress controller should inject into HTTP requests. The + // field's value must be a valid HTTP header name as defined in RFC 2616 + // section 4.2. If the field is empty, no header is injected. + // + // +optional + // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Name string `json:"name,omitempty"` + + // format specifies the format for the injected HTTP header's value. + // This field has no effect unless name is specified. For the + // HAProxy-based ingress controller implementation, this format uses the + // same syntax as the HTTP log format. If the field is empty, the + // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the + // corresponding HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // +optional + // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Format string `json:"format,omitempty"` +} + +// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header +// (for example, "X-Forwarded-For") in the desired capitalization. The value +// must be a valid HTTP header name as defined in RFC 2616 section 4.2. +// +// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=1024 +type IngressControllerHTTPHeaderNameCaseAdjustment string + +// IngressControllerHTTPHeaders specifies how the IngressController handles +// certain HTTP headers. +type IngressControllerHTTPHeaders struct { + // forwardedHeaderPolicy specifies when and how the IngressController + // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + // HTTP headers. The value may be one of the following: + // + // * "Append", which specifies that the IngressController appends the + // headers, preserving existing headers. + // + // * "Replace", which specifies that the IngressController sets the + // headers, replacing any existing Forwarded or X-Forwarded-* headers. + // + // * "IfNone", which specifies that the IngressController sets the + // headers if they are not already set. + // + // * "Never", which specifies that the IngressController never sets the + // headers, preserving any existing headers. + // + // By default, the policy is "Append". + // + // +optional + ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + + // uniqueId describes configuration for a custom HTTP header that the + // ingress controller should inject into incoming HTTP requests. + // Typically, this header is configured to have a value that is unique + // to the HTTP request. The header can be used by applications or + // included in access logs to facilitate tracing individual HTTP + // requests. + // + // If this field is empty, no such header is injected into requests. + // + // +optional + UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"` + + // headerNameCaseAdjustments specifies case adjustments that can be + // applied to HTTP header names. Each adjustment is specified as an + // HTTP header name with the desired capitalization. For example, + // specifying "X-Forwarded-For" indicates that the "x-forwarded-for" + // HTTP header should be adjusted to have the specified capitalization. + // + // These adjustments are only applied to cleartext, edge-terminated, and + // re-encrypt routes, and only when using HTTP/1. + // + // For request headers, these adjustments are applied only for routes + // that have the haproxy.router.openshift.io/h1-adjust-case=true + // annotation. For response headers, these adjustments are applied to + // all HTTP responses. + // + // If this field is empty, no request headers are adjusted. + // + // +nullable + // +optional + // +listType=atomic + HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` + + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` + // may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in + // accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // Headers set using this API cannot be captured for use in access logs. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + Actions IngressControllerHTTPHeaderActions `json:"actions,omitempty"` +} + +// IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +type IngressControllerHTTPHeaderActions struct { + // response is a list of HTTP response headers to modify. + // Actions defined here will modify the response headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for response headers will be executed after Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + Response []IngressControllerHTTPHeader `json:"response"` + // request is a list of HTTP request headers to modify. + // Actions defined here will modify the request headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for request headers will be executed before Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + Request []IngressControllerHTTPHeader `json:"request"` +} + +// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. +type IngressControllerHTTPHeader struct { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'host'",message="host header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + Name string `json:"name"` + // action specifies actions to perform on headers, such as setting or deleting headers. + // +required + Action IngressControllerHTTPHeaderActionUnion `json:"action"` +} + +// IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +type IngressControllerHTTPHeaderActionUnion struct { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +required + Type IngressControllerHTTPHeaderActionType `json:"type"` + + // set specifies how the HTTP header should be set. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + Set *IngressControllerSetHTTPHeader `json:"set,omitempty"` +} + +// IngressControllerHTTPHeaderActionType defines actions that can be performed on HTTP headers. +type IngressControllerHTTPHeaderActionType string + +const ( + // Set specifies that an HTTP header should be set. + Set IngressControllerHTTPHeaderActionType = "Set" + // Delete specifies that an HTTP header should be deleted. + Delete IngressControllerHTTPHeaderActionType = "Delete" +) + +// IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header. +type IngressControllerSetHTTPHeader struct { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + Value string `json:"value"` +} + +// IngressControllerTuningOptions specifies options for tuning the performance +// of ingress controller pods +type IngressControllerTuningOptions struct { + // headerBufferBytes describes how much memory should be reserved + // (in bytes) for IngressController connection sessions. + // Note that this value must be at least 16384 if HTTP/2 is + // enabled for the IngressController (https://tools.ietf.org/html/rfc7540). + // If this field is empty, the IngressController will use a default value + // of 32768 bytes. + // + // Setting this field is generally not recommended as headerBufferBytes + // values that are too small may break the IngressController and + // headerBufferBytes values that are too large could cause the + // IngressController to use significantly more memory than necessary. + // + // +kubebuilder:validation:Minimum=16384 + // +optional + HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` + + // headerBufferMaxRewriteBytes describes how much memory should be reserved + // (in bytes) from headerBufferBytes for HTTP header rewriting + // and appending for IngressController connection sessions. + // Note that incoming HTTP requests will be limited to + // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + // headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + // If this field is empty, the IngressController will use a default value + // of 8192 bytes. + // + // Setting this field is generally not recommended as + // headerBufferMaxRewriteBytes values that are too small may break the + // IngressController and headerBufferMaxRewriteBytes values that are too + // large could cause the IngressController to use significantly more memory + // than necessary. + // + // +kubebuilder:validation:Minimum=4096 + // +optional + HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + + // threadCount defines the number of threads created per HAProxy process. + // Creating more threads allows each ingress controller pod to handle more + // connections, at the cost of more system resources being used. HAProxy + // currently supports up to 64 threads. If this field is empty, the + // IngressController will use the default value. The current default is 4 + // threads, but this may change in future releases. + // + // Setting this field is generally not recommended. Increasing the number + // of HAProxy threads allows ingress controller pods to utilize more CPU + // time under load, potentially starving other pods if set too high. + // Reducing the number of threads may cause the ingress controller to + // perform poorly. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=64 + // +optional + ThreadCount int32 `json:"threadCount,omitempty"` + + // clientTimeout defines how long a connection will be held open while + // waiting for a client response. + // + // If unset, the default timeout is 30s + // +kubebuilder:validation:Format=duration + // +optional + ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` + + // clientFinTimeout defines how long a connection will be held open while + // waiting for the client response to the server/backend closing the + // connection. + // + // If unset, the default timeout is 1s + // +kubebuilder:validation:Format=duration + // +optional + ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` + + // serverTimeout defines how long a connection will be held open while + // waiting for a server/backend response. + // + // If unset, the default timeout is 30s + // +kubebuilder:validation:Format=duration + // +optional + ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` + + // serverFinTimeout defines how long a connection will be held open while + // waiting for the server/backend response to the client closing the + // connection. + // + // If unset, the default timeout is 1s + // +kubebuilder:validation:Format=duration + // +optional + ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` + + // tunnelTimeout defines how long a tunnel connection (including + // websockets) will be held open while the tunnel is idle. + // + // If unset, the default timeout is 1h + // +kubebuilder:validation:Format=duration + // +optional + TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` + + // connectTimeout defines the maximum time to wait for + // a connection attempt to a server/backend to succeed. + // + // This field expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // When omitted, this means the user has no opinion and the platform is left + // to choose a reasonable default. This default is subject to change over time. + // The current default is 5s. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + + // tlsInspectDelay defines how long the router can hold data to find a + // matching route. + // + // Setting this too short can cause the router to fall back to the default + // certificate for edge-terminated or reencrypt routes even when a better + // matching certificate could be used. + // + // If unset, the default inspect delay is 5s + // +kubebuilder:validation:Format=duration + // +optional + TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` + + // healthCheckInterval defines how long the router waits between two consecutive + // health checks on its configured backends. This value is applied globally as + // a default for all routes, but may be overridden per-route by the route annotation + // "router.openshift.io/haproxy.health.check.interval". + // + // Expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // Setting this to less than 5s can cause excess traffic due to too frequent + // TCP health checks and accompanying SYN packet storms. Alternatively, setting + // this too high can result in increased latency, due to backend servers that are no + // longer available, but haven't yet been detected as such. + // + // An empty or zero healthCheckInterval means no opinion and IngressController chooses + // a default, which is subject to change over time. + // Currently the default healthCheckInterval value is 5s. + // + // Currently the minimum allowed value is 1s and the maximum allowed value is + // 2147483647ms (24.85 days). Both are subject to change over time. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` + + // maxConnections defines the maximum number of simultaneous + // connections that can be established per HAProxy process. + // Increasing this value allows each ingress controller pod to + // handle more connections but at the cost of additional + // system resources being consumed. + // + // Permitted values are: empty, 0, -1, and the range + // 2000-2000000. + // + // If this field is empty or 0, the IngressController will use + // the default value of 50000, but the default is subject to + // change in future releases. + // + // If the value is -1 then HAProxy will dynamically compute a + // maximum value based on the available ulimits in the running + // container. Selecting -1 (i.e., auto) will result in a large + // value being computed (~520000 on OpenShift >=4.10 clusters) + // and therefore each HAProxy process will incur significant + // memory usage compared to the current default of 50000. + // + // Setting a value that is greater than the current operating + // system limit will prevent the HAProxy process from + // starting. + // + // If you choose a discrete value (e.g., 750000) and the + // router pod is migrated to a new node, there's no guarantee + // that that new node has identical ulimits configured. In + // such a scenario the pod would fail to start. If you have + // nodes with different ulimits configured (e.g., different + // tuned profiles) and you choose a discrete value then the + // guidance is to use -1 and let the value be computed + // dynamically at runtime. + // + // You can monitor memory usage for router containers with the + // following metric: + // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}'. + // + // You can monitor memory usage of individual HAProxy + // processes in router containers with the following metric: + // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. + // + // +optional + MaxConnections int32 `json:"maxConnections,omitempty"` + + // reloadInterval defines the minimum interval at which the router is allowed to reload + // to accept new changes. Increasing this value can prevent the accumulation of + // HAProxy processes, depending on the scenario. Increasing this interval can + // also lessen load imbalance on a backend's servers when using the roundrobin + // balancing algorithm. Alternatively, decreasing this value may decrease latency + // since updates to HAProxy's configuration can take effect more quickly. + // + // The value must be a time duration value; see . + // Currently, the minimum value allowed is 1s, and the maximum allowed value is + // 120s. Minimum and maximum allowed values may change in future versions of OpenShift. + // Note that if a duration outside of these bounds is provided, the value of reloadInterval + // will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to + // 120s; the IngressController will not reject and replace this disallowed value with + // the default). + // + // A zero value for reloadInterval tells the IngressController to choose the default, + // which is currently 5s and subject to change without notice. + // + // This field expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // Note: Setting a value significantly larger than the default of 5s can cause latency + // in observing updates to routes and their endpoints. HAProxy's configuration will + // be reloaded less frequently, and newly created routes will not be served until the + // subsequent reload. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + ReloadInterval metav1.Duration `json:"reloadInterval,omitempty"` +} + +// HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request +// is received should be handled. +// +kubebuilder:validation:Enum=Respond;Ignore +type HTTPEmptyRequestsPolicy string + +const ( + // HTTPEmptyRequestsPolicyRespond indicates that the ingress controller + // should respond to empty requests. + HTTPEmptyRequestsPolicyRespond HTTPEmptyRequestsPolicy = "Respond" + // HTTPEmptyRequestsPolicyIgnore indicates that the ingress controller + // should ignore empty requests. + HTTPEmptyRequestsPolicyIgnore HTTPEmptyRequestsPolicy = "Ignore" +) + +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + +// IngressControllerStatus defines the observed status of the IngressController. +type IngressControllerStatus struct { + // availableReplicas is number of observed available replicas according to the + // ingress controller deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + + // selector is a label selector, in string format, for ingress controller pods + // corresponding to the IngressController. The number of matching pods should + // equal the value of availableReplicas. + // +optional + Selector string `json:"selector"` + + // domain is the actual domain in use. + // +optional + Domain string `json:"domain"` + + // endpointPublishingStrategy is the actual strategy in use. + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + // +listType=map + // +listMapKey=type + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + + // observedGeneration is the most recent generation observed. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // namespaceSelector is the actual namespaceSelector in use. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is the actual routeSelector in use. + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressControllerList contains a list of IngressControllers. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IngressControllerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []IngressController `json:"items"` +} + +// IngressControllerConnectionTerminationPolicy defines the behaviour +// for handling idle connections during a soft reload of the router. +// +// +kubebuilder:validation:Enum=Immediate;Deferred +type IngressControllerConnectionTerminationPolicy string + +const ( + // IngressControllerConnectionTerminationPolicyImmediate specifies + // that idle connections should be closed immediately during a + // router reload. + IngressControllerConnectionTerminationPolicyImmediate IngressControllerConnectionTerminationPolicy = "Immediate" + + // IngressControllerConnectionTerminationPolicyDeferred + // specifies that idle connections should remain open until a + // terminating event, such as a new request, the expiration of + // the proxy keep-alive timeout, or the client closing the + // connection. + IngressControllerConnectionTerminationPolicyDeferred IngressControllerConnectionTerminationPolicy = "Deferred" +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_insights.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_insights.go new file mode 100644 index 000000000..ed59bb438 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_insights.go @@ -0,0 +1,156 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsoperators,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1237 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=insights,operatorOrdering=00 +// +// InsightsOperator holds cluster-wide information about the Insights Operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Insights. + // +required + Spec InsightsOperatorSpec `json:"spec"` + + // status is the most recently observed status of the Insights operator. + // +optional + Status InsightsOperatorStatus `json:"status"` +} + +type InsightsOperatorSpec struct { + OperatorSpec `json:",inline"` +} + +type InsightsOperatorStatus struct { + OperatorStatus `json:",inline"` + // gatherStatus provides basic information about the last Insights data gathering. + // When omitted, this means no data gathering has taken place yet. + // +optional + GatherStatus GatherStatus `json:"gatherStatus,omitempty"` + // insightsReport provides general Insights analysis results. + // When omitted, this means no data gathering has taken place yet. + // +optional + InsightsReport InsightsReport `json:"insightsReport,omitempty"` +} + +// gatherStatus provides information about the last known gather event. +type GatherStatus struct { + // lastGatherTime is the last time when Insights data gathering finished. + // An empty value means that no data has been gathered yet. + // +optional + LastGatherTime metav1.Time `json:"lastGatherTime,omitempty"` + // lastGatherDuration is the total time taken to process + // all gatherers during the last gather event. + // +optional + // +kubebuilder:validation:Pattern="^(0|([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" + // +kubebuilder:validation:Type=string + LastGatherDuration metav1.Duration `json:"lastGatherDuration,omitempty"` + // gatherers is a list of active gatherers (and their statuses) in the last gathering. + // +listType=atomic + // +optional + Gatherers []GathererStatus `json:"gatherers,omitempty"` +} + +// insightsReport provides Insights health check report based on the most +// recently sent Insights data. +type InsightsReport struct { + // downloadedAt is the time when the last Insights report was downloaded. + // An empty value means that there has not been any Insights report downloaded yet and + // it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + // +optional + DownloadedAt metav1.Time `json:"downloadedAt,omitempty"` + // healthChecks provides basic information about active Insights health checks + // in a cluster. + // +listType=atomic + // +optional + HealthChecks []HealthCheck `json:"healthChecks,omitempty"` +} + +// healthCheck represents an Insights health check attributes. +type HealthCheck struct { + // description provides basic description of the healtcheck. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:MinLength=10 + Description string `json:"description"` + // totalRisk of the healthcheck. Indicator of the total risk posed + // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, + // and the higher the number, the more important the issue. + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4 + TotalRisk int32 `json:"totalRisk"` + // advisorURI provides the URL link to the Insights Advisor. + // +required + // +kubebuilder:validation:Pattern=`^https:\/\/\S+` + AdvisorURI string `json:"advisorURI"` + // state determines what the current state of the health check is. + // Health check is enabled by default and can be disabled + // by the user in the Insights advisor user interface. + // +required + State HealthCheckState `json:"state"` +} + +// healthCheckState provides information about the status of the +// health check (for example, the health check may be marked as disabled by the user). +// +kubebuilder:validation:Enum:=Enabled;Disabled +type HealthCheckState string + +const ( + // enabled marks the health check as enabled + HealthCheckEnabled HealthCheckState = "Enabled" + // disabled marks the health check as disabled + HealthCheckDisabled HealthCheckState = "Disabled" +) + +// gathererStatus represents information about a particular +// data gatherer. +type GathererStatus struct { + // conditions provide details on the status of each gatherer. + // +listType=atomic + // +required + // +kubebuilder:validation:MinItems=1 + Conditions []metav1.Condition `json:"conditions"` + // name is the name of the gatherer. + // +required + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=5 + Name string `json:"name"` + // lastGatherDuration represents the time spent gathering. + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^(([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" + LastGatherDuration metav1.Duration `json:"lastGatherDuration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsOperatorList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []InsightsOperator `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go new file mode 100644 index 000000000..7d468755a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -0,0 +1,81 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubeapiservers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_20,operatorName=kube-apiserver,operatorOrdering=01 + +// KubeAPIServer provides information to configure an operator to manage kube-apiserver. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes API Server + // +required + Spec KubeAPIServerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes API Server + // +optional + Status KubeAPIServerStatus `json:"status"` +} + +type KubeAPIServerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeAPIServerStatus struct { + StaticPodOperatorStatus `json:",inline"` + + // serviceAccountIssuers tracks history of used service account issuers. + // The item without expiration time represents the currently used service account issuer. + // The other items represents service account issuers that were used previously and are still being trusted. + // The default expiration for the items is set by the platform and it defaults to 24h. + // see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + // +optional + ServiceAccountIssuers []ServiceAccountIssuerStatus `json:"serviceAccountIssuers,omitempty"` +} + +type ServiceAccountIssuerStatus struct { + // name is the name of the service account issuer + // --- + // + This value comes from the serviceAccountIssuer field on the authentication.config.openshift.io/v1 resource. + // + As the authentication field is not validated, we cannot apply validation here else this may cause the controller + // + to error when trying to update this status field. + Name string `json:"name"` + + // expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list + // of service account issuers. + // +optional + ExpirationTime *metav1.Time `json:"expirationTime,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeAPIServer `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go new file mode 100644 index 000000000..ee104aa50 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -0,0 +1,67 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubecontrollermanagers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-controller-manager,operatorOrdering=01 + +// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Controller Manager + // +required + Spec KubeControllerManagerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Controller Manager + // +optional + Status KubeControllerManagerStatus `json:"status"` +} + +type KubeControllerManagerSpec struct { + StaticPodOperatorSpec `json:",inline"` + + // useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only + // enough certificates to validate service serving certificates. + // Once set to true, it cannot be set to false. + // Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will + // only have the more secure content. + // +kubebuilder:default=false + UseMoreSecureServiceCA bool `json:"useMoreSecureServiceCA"` +} + +type KubeControllerManagerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManagerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeControllerManager `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go new file mode 100644 index 000000000..f3add4910 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -0,0 +1,56 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubestorageversionmigrators,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/503 +// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=kube-storage-version-migrator,operatorOrdering=00 + +// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeStorageVersionMigrator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec KubeStorageVersionMigratorSpec `json:"spec"` + // +optional + Status KubeStorageVersionMigratorStatus `json:"status"` +} + +type KubeStorageVersionMigratorSpec struct { + OperatorSpec `json:",inline"` +} + +type KubeStorageVersionMigratorStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigratorList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeStorageVersionMigratorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeStorageVersionMigrator `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go new file mode 100644 index 000000000..2d88bcd77 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -0,0 +1,519 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machineconfigurations,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1453 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 + +// MachineConfiguration provides information to configure an operator to manage Machine Configuration. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Machine Config Operator + // +required + Spec MachineConfigurationSpec `json:"spec"` + + // status is the most recently observed status of the Machine Config Operator + // +optional + Status MachineConfigurationStatus `json:"status"` +} + +type MachineConfigurationSpec struct { + StaticPodOperatorSpec `json:",inline"` + + // TODO(jkyros): This is where we put our knobs and dials + + // managedBootImages allows configuration for the management of boot images for machine + // resources within the cluster. This configuration allows users to select resources that should + // be updated to the latest boot images during cluster upgrades, ensuring that new machines + // always boot with the current cluster version's boot image. When omitted, this means no opinion + // and the platform is left to choose a reasonable default, which is subject to change over time. + // The default for each machine manager mode is All for GCP and AWS platforms, and None for all + // other platforms. + // +openshift:enable:FeatureGate=ManagedBootImages + // +optional + ManagedBootImages ManagedBootImages `json:"managedBootImages"` + + // nodeDisruptionPolicy allows an admin to set granular node disruption actions for + // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow + // for less downtime when doing small configuration updates to the cluster. This configuration + // has no effect on cluster upgrades which will still incur node disruption where required. + // +optional + NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` +} + +type MachineConfigurationStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Previously there was a StaticPodOperatorStatus here for legacy reasons. Many of the fields within + // it are no longer relevant for the MachineConfiguration CRD's functions. The following remainder + // fields were tombstoned after lifting out StaticPodOperatorStatus. To avoid conflicts with + // serialisation, the following field names may never be used again. + + // Tombstone: legacy field from StaticPodOperatorStatus + // Version string `json:"version,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // ReadyReplicas int32 `json:"readyReplicas"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // Generations []GenerationStatus `json:"generations,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` + + // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, + // and will be used by the Machine Config Daemon during future node updates. + // +optional + NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` + + // managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is + // and will be used by Machine Config Controller while performing boot image updates. + // +openshift:enable:FeatureGate=ManagedBootImages + // +optional + ManagedBootImagesStatus ManagedBootImages `json:"managedBootImagesStatus"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineConfigurationList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineConfigurationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []MachineConfiguration `json:"items"` +} + +type ManagedBootImages struct { + // machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + // will watch for changes to this list. Only one entry is permitted per type of machine management resource. + // +optional + // +listType=map + // +listMapKey=resource + // +listMapKey=apiGroup + // +kubebuilder:validation:MaxItems=5 + MachineManagers []MachineManager `json:"machineManagers"` +} + +// MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information +// such as the resource type and the API Group of the resource. It also provides granular control via the selection field. +type MachineManager struct { + // resource is the machine management resource's type. + // The only current valid value is machinesets. + // machinesets means that the machine manager will only register resources of the kind MachineSet. + // +required + Resource MachineManagerMachineSetsResourceType `json:"resource"` + + // apiGroup is name of the APIGroup that the machine management resource belongs to. + // The only current valid value is machine.openshift.io. + // machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + // +required + APIGroup MachineManagerMachineSetsAPIGroupType `json:"apiGroup"` + + // selection allows granular control of the machine management resources that will be registered for boot image updates. + // +required + Selection MachineManagerSelector `json:"selection"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Partial' ? has(self.partial) : !has(self.partial)",message="Partial is required when type is partial, and forbidden otherwise" +// +union +type MachineManagerSelector struct { + // mode determines how machine managers will be selected for updates. + // Valid values are All and Partial. + // All means that every resource matched by the machine manager will be updated. + // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + // None means that every resource matched by the machine manager will not be updated. + // +unionDiscriminator + // +required + Mode MachineManagerSelectorMode `json:"mode"` + + // partial provides label selector(s) that can be used to match machine management resources. + // Only permitted when mode is set to "Partial". + // +optional + Partial *PartialSelector `json:"partial,omitempty"` +} + +// PartialSelector provides label selector(s) that can be used to match machine management resources. +type PartialSelector struct { + // machineResourceSelector is a label selector that can be used to select machine resources like MachineSets. + // +required + MachineResourceSelector *metav1.LabelSelector `json:"machineResourceSelector,omitempty"` +} + +// MachineManagerSelectorMode is a string enum used in the MachineManagerSelector union discriminator. +// +kubebuilder:validation:Enum:="All";"Partial";"None" +type MachineManagerSelectorMode string + +const ( + // All represents a configuration mode that registers all resources specified by the parent MachineManager for boot image updates. + All MachineManagerSelectorMode = "All" + + // Partial represents a configuration mode that will register resources specified by the parent MachineManager only + // if they match with the label selector. + Partial MachineManagerSelectorMode = "Partial" + + // None represents a configuration mode that excludes all resources specified by the parent MachineManager from boot image updates. + None MachineManagerSelectorMode = "None" +) + +// MachineManagerManagedResourceType is a string enum used in the MachineManager type to describe the resource +// type to be registered. +// +kubebuilder:validation:Enum:="machinesets" +type MachineManagerMachineSetsResourceType string + +const ( + // MachineSets represent the MachineSet resource type, which manage a group of machines and belong to the Openshift machine API group. + MachineSets MachineManagerMachineSetsResourceType = "machinesets" +) + +// MachineManagerManagedAPIGroupType is a string enum used in in the MachineManager type to describe the APIGroup +// of the resource type being registered. +// +kubebuilder:validation:Enum:="machine.openshift.io" +type MachineManagerMachineSetsAPIGroupType string + +const ( + // MachineAPI represent the traditional MAPI Group that a machineset may belong to. + // This feature only supports MAPI machinesets at this time. + MachineAPI MachineManagerMachineSetsAPIGroupType = "machine.openshift.io" +) + +type NodeDisruptionPolicyStatus struct { + // clusterPolicies is a merge of cluster default and user provided node disruption policies. + // +optional + ClusterPolicies NodeDisruptionPolicyClusterStatus `json:"clusterPolicies"` +} + +// NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys +type NodeDisruptionPolicyConfig struct { + // files is a list of MachineConfig file definitions and actions to take to changes on those paths + // This list supports a maximum of 50 entries. + // +optional + // +listType=map + // +listMapKey=path + // +kubebuilder:validation:MaxItems=50 + Files []NodeDisruptionPolicySpecFile `json:"files"` + // units is a list MachineConfig unit definitions and actions to take on changes to those services + // This list supports a maximum of 50 entries. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=50 + Units []NodeDisruptionPolicySpecUnit `json:"units"` + // sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this + // will apply to all sshkey changes in the cluster + // +optional + SSHKey NodeDisruptionPolicySpecSSHKey `json:"sshkey"` +} + +// NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a +// merge of cluster defaults and user provided policies +type NodeDisruptionPolicyClusterStatus struct { + // files is a list of MachineConfig file definitions and actions to take to changes on those paths + // +optional + // +listType=map + // +listMapKey=path + // +kubebuilder:validation:MaxItems=100 + Files []NodeDisruptionPolicyStatusFile `json:"files,omitempty"` + // units is a list MachineConfig unit definitions and actions to take on changes to those services + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=100 + Units []NodeDisruptionPolicyStatusUnit `json:"units,omitempty"` + // sshkey is the overall sshkey MachineConfig definition + // +optional + SSHKey NodeDisruptionPolicyStatusSSHKey `json:"sshkey,omitempty"` +} + +// NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecFile struct { + // path is the location of a file being managed through a MachineConfig. + // The Actions in the policy will apply to changes to the file at this path. + // +required + Path string `json:"path"` + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusFile struct { + // path is the location of a file being managed through a MachineConfig. + // The Actions in the policy will apply to changes to the file at this path. + // +required + Path string `json:"path"` + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecUnit struct { + // name represents the service name of a systemd service managed through a MachineConfig + // Actions specified will be applied for changes to the named service. + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + Name NodeDisruptionPolicyServiceName `json:"name"` + + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusUnit struct { + // name represents the service name of a systemd service managed through a MachineConfig + // Actions specified will be applied for changes to the named service. + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + Name NodeDisruptionPolicyServiceName `json:"name"` + + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecSSHKey struct { + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusSSHKey struct { + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise" +// +union +type NodeDisruptionPolicySpecAction struct { + // type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + // reload/restart requires a corresponding service target specified in the reload/restart field. + // Other values require no further configuration + // +unionDiscriminator + // +required + Type NodeDisruptionPolicySpecActionType `json:"type"` + // reload specifies the service to reload, only valid if type is reload + // +optional + Reload *ReloadService `json:"reload,omitempty"` + // restart specifies the service to restart, only valid if type is restart + // +optional + Restart *RestartService `json:"restart,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise" +// +union +type NodeDisruptionPolicyStatusAction struct { + // type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + // reload/restart requires a corresponding service target specified in the reload/restart field. + // Other values require no further configuration + // +unionDiscriminator + // +required + Type NodeDisruptionPolicyStatusActionType `json:"type"` + // reload specifies the service to reload, only valid if type is reload + // +optional + Reload *ReloadService `json:"reload,omitempty"` + // restart specifies the service to restart, only valid if type is restart + // +optional + Restart *RestartService `json:"restart,omitempty"` +} + +// ReloadService allows the user to specify the services to be reloaded +type ReloadService struct { + // serviceName is the full name (e.g. crio.service) of the service to be reloaded + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` +} + +// RestartService allows the user to specify the services to be restarted +type RestartService struct { + // serviceName is the full name (e.g. crio.service) of the service to be restarted + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` +} + +// NodeDisruptionPolicySpecActionType is a string enum used in a NodeDisruptionPolicySpecAction object. They describe an action to be performed. +// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None" +type NodeDisruptionPolicySpecActionType string + +// +kubebuilder:validation:XValidation:rule=`self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')`, message="Invalid ${SERVICETYPE} in service name. Expected format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\",\".snapshot\", \".slice\" or \".scope\"." +// +kubebuilder:validation:XValidation:rule=`self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')`, message="Invalid ${NAME} in service name. Expected format is ${NAME}${SERVICETYPE}, where {NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\"" +// +kubebuilder:validation:MaxLength=255 +type NodeDisruptionPolicyServiceName string + +const ( + // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO + // if a reboot policy is not found for a change/update being performed by the MCO. + RebootSpecAction NodeDisruptionPolicySpecActionType = "Reboot" + + // Drain represents an action that will cause nodes to be drained of their workloads. + DrainSpecAction NodeDisruptionPolicySpecActionType = "Drain" + + // Reload represents an action that will cause nodes to reload the service described by the Target field. + ReloadSpecAction NodeDisruptionPolicySpecActionType = "Reload" + + // Restart represents an action that will cause nodes to restart the service described by the Target field. + RestartSpecAction NodeDisruptionPolicySpecActionType = "Restart" + + // DaemonReload represents an action that TBD + DaemonReloadSpecAction NodeDisruptionPolicySpecActionType = "DaemonReload" + + // None represents an action that no handling is required by the MCO. + NoneSpecAction NodeDisruptionPolicySpecActionType = "None" +) + +// NodeDisruptionPolicyStatusActionType is a string enum used in a NodeDisruptionPolicyStatusAction object. They describe an action to be performed. +// The key difference of this object from NodeDisruptionPolicySpecActionType is that there is a additional SpecialStatusAction value in this enum. This will only be +// used by the MCO's controller to indicate some internal actions. They are not part of the NodeDisruptionPolicyConfig object and cannot be set by the user. +// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None";"Special" +type NodeDisruptionPolicyStatusActionType string + +const ( + // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO + // if a reboot policy is not found for a change/update being performed by the MCO. + RebootStatusAction NodeDisruptionPolicyStatusActionType = "Reboot" + + // Drain represents an action that will cause nodes to be drained of their workloads. + DrainStatusAction NodeDisruptionPolicyStatusActionType = "Drain" + + // Reload represents an action that will cause nodes to reload the service described by the Target field. + ReloadStatusAction NodeDisruptionPolicyStatusActionType = "Reload" + + // Restart represents an action that will cause nodes to restart the service described by the Target field. + RestartStatusAction NodeDisruptionPolicyStatusActionType = "Restart" + + // DaemonReload represents an action that TBD + DaemonReloadStatusAction NodeDisruptionPolicyStatusActionType = "DaemonReload" + + // None represents an action that no handling is required by the MCO. + NoneStatusAction NodeDisruptionPolicyStatusActionType = "None" + + // Special represents an action that is internal to the MCO, and is not allowed in user defined NodeDisruption policies. + SpecialStatusAction NodeDisruptionPolicyStatusActionType = "Special" +) + +// These strings will be used for MachineConfiguration Status conditions. +const ( + // MachineConfigurationBootImageUpdateDegraded means that the MCO ran into an error while reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to degrade. This condition will indicate the cause + // of the degrade, the progress of the update and the generation of the boot images configmap that it degraded on. + MachineConfigurationBootImageUpdateDegraded string = "BootImageUpdateDegraded" + + // MachineConfigurationBootImageUpdateProgressing means that the MCO is in the process of reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to be in a Progressing state. This condition will + // indicate the progress of the update and the generation of the boot images configmap that triggered this update. + MachineConfigurationBootImageUpdateProgressing string = "BootImageUpdateProgressing" +) diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_network.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 000000000..111240eec --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,900 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=network,operatorOrdering=01 + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:openapi-gen=true +// +openshift:compatibility-gen:level=1 +type Network struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is detailed operator status, which is distilled +// up to the Network clusteroperator object. +type NetworkStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +// +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +type NetworkSpec struct { + OperatorSpec `json:",inline"` + + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + // +listType=atomic + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + // +listType=atomic + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + // +listType=map + // +listMapKey=name + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork defaults to 'false' and this setting enables the pod multi-networking capability. + // disableMultiNetwork when set to 'true' at cluster install time does not install the components, typically the Multus CNI and the network-attachment-definition CRD, + // that enable the pod multi-networking capability. Setting the parameter to 'true' might be useful when you need install third-party CNI plugins, + // but these plugins are not supported by Red Hat. Changing the parameter value as a postinstallation cluster task has no effect. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // useMultiNetworkPolicy enables a controller which allows for + // MultiNetworkPolicy objects to be used on additional networks as + // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + // objects, but NetworkPolicy objects only apply to the primary interface. + // With MultiNetworkPolicy, you can control the traffic that a pod can receive + // over the secondary interfaces. If unset, this property defaults to 'false' + // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is + // 'true' then the value of this field is ignored. + UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when ovn-kubernetes is used and true + // otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + // CRs from a test pod to every node, apiserver and LB should be disabled or not. + // If unset, this property defaults to 'false' and network diagnostics is enabled. + // Setting this to 'true' would reduce the additional load of the pods performing the checks. + // +optional + // +kubebuilder:default:=false + DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"` + + // kubeProxyConfig lets us configure desired proxy configuration, if + // deployKubeProxy is true. If not specified, sensible defaults will be chosen by + // OpenShift directly. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` + + // exportNetworkFlows enables and configures the export of network flow metadata from the pod network + // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. + // If unset, flows will not be exported to any collector. + // +optional + ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"` + + // migration enables and configures cluster network migration, for network changes + // that cannot be made instantly. + // +optional + Migration *NetworkMigration `json:"migration,omitempty"` + + // additionalRoutingCapabilities describes components and relevant + // configuration providing additional routing capabilities. When set, it + // enables such components and the usage of the routing capabilities they + // provide for the machine network. Upstream operators, like MetalLB + // operator, requiring these capabilities may rely on, or automatically set + // this attribute. Network plugins may leverage advanced routing + // capabilities acquired through the enablement of these components but may + // require specific configuration on their side to do so; refer to their + // respective documentation and configuration options. + // +openshift:enable:FeatureGate=AdditionalRoutingCapabilities + // +optional + AdditionalRoutingCapabilities *AdditionalRoutingCapabilities `json:"additionalRoutingCapabilities,omitempty"` +} + +// NetworkMigrationMode is an enumeration of the possible mode of the network migration +// Valid values are "Live", "Offline" and omitted. +// DEPRECATED: network type migration is no longer supported. +// +kubebuilder:validation:Enum:=Live;Offline;"" +type NetworkMigrationMode string + +const ( + // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. + // DEPRECATED: network type migration is no longer supported. + LiveNetworkMigrationMode NetworkMigrationMode = "Live" + // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. + // DEPRECATED: network type migration is no longer supported. + OfflineNetworkMigrationMode NetworkMigrationMode = "Offline" +) + +// NetworkMigration represents the cluster network migration configuration. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" +type NetworkMigration struct { + // mtu contains the MTU migration configuration. Set this to allow changing + // the MTU values for the default network. If unset, the operation of + // changing the MTU for the default network will be rejected. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` + + // networkType was previously used when changing the default network type. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + NetworkType string `json:"networkType,omitempty"` + + // features was previously used to configure which network plugin features + // would be migrated in a network type migration. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + Features *FeaturesMigration `json:"features,omitempty"` + + // mode indicates the mode of network type migration. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + Mode NetworkMigrationMode `json:"mode,omitempty"` +} + +type FeaturesMigration struct { + // egressIP specified whether or not the Egress IP configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + EgressIP bool `json:"egressIP,omitempty"` + // egressFirewall specified whether or not the Egress Firewall configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + EgressFirewall bool `json:"egressFirewall,omitempty"` + // multicast specified whether or not the multicast configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + Multicast bool `json:"multicast,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // network contains information about MTU migration for the default network. + // Migrations are only allowed to MTU values lower than the machine's uplink + // MTU by the minimum appropriate offset. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // machine contains MTU migration configuration for the machine's uplink. + // Needs to be migrated along with the default network MTU unless the + // current uplink MTU already accommodates the default network MTU. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // to is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // from is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If +// the HostPrefix field is not used by the plugin, it can be left unset. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openshiftSDNConfig was previously used to configure the openshift-sdn plugin. + // DEPRECATED: OpenShift SDN is no longer supported. + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // ovnKubernetesConfig configures the ovn-kubernetes plugin. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` +} + +// SimpleMacvlanConfig contains configurations for macvlan interface. +type SimpleMacvlanConfig struct { + // master is the host interface to create the macvlan interface from. + // If not specified, it will be default route interface + // +optional + Master string `json:"master,omitempty"` + + // ipamConfig configures IPAM module will be used for IP Address Management (IPAM). + // +optional + IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` + + // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + // +optional + Mode MacvlanMode `json:"mode,omitempty"` + + // mtu is the mtu to use for the macvlan interface. if unset, host's + // kernel will select the value. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU uint32 `json:"mtu,omitempty"` +} + +// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +type StaticIPAMAddresses struct { + // address is the IP address in CIDR format + // +optional + Address string `json:"address"` + // gateway is IP inside of subnet to designate as the gateway + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +type StaticIPAMRoutes struct { + // destination points the IP route destination + Destination string `json:"destination"` + // gateway is the route's next-hop IP address + // If unset, a default gateway is assumed (as determined by the CNI plugin). + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMDNS provides DNS related information for static IPAM +type StaticIPAMDNS struct { + // nameservers points DNS servers for IP lookup + // +optional + // +listType=atomic + Nameservers []string `json:"nameservers,omitempty"` + // domain configures the domainname the local domain used for short hostname lookups + // +optional + Domain string `json:"domain,omitempty"` + // search configures priority ordered search domains for short hostname lookups + // +optional + // +listType=atomic + Search []string `json:"search,omitempty"` +} + +// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) +type StaticIPAMConfig struct { + // addresses configures IP address for the interface + // +optional + // +listType=atomic + Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` + // routes configures IP routes for the interface + // +optional + // +listType=atomic + Routes []StaticIPAMRoutes `json:"routes,omitempty"` + // dns configures DNS for the interface + // +optional + DNS *StaticIPAMDNS `json:"dns,omitempty"` +} + +// IPAMConfig contains configurations for IPAM (IP Address Management) +type IPAMConfig struct { + // type is the type of IPAM module will be used for IP Address Management(IPAM). + // The supported values are IPAMTypeDHCP, IPAMTypeStatic + Type IPAMType `json:"type"` + + // staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // +optional + StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + // +required + Name string `json:"name"` + + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig,omitempty"` + + // simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // +optional + SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` +} + +// OpenShiftSDNConfig was used to configure the OpenShift SDN plugin. It is no longer used. +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +kubebuilder:validation:Minimum=0 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch used to control whether the operator would deploy an OVS + // DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always + // run as a system service, and this flag is ignored. + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + + // enableUnidling controls whether or not the service proxy will support idling + // and unidling of services. By default, unidling is enabled. + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// ovnKubernetesConfig contains the configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` + // hybridOverlayConfig configures an additional overlay network for peers that are + // not using OVN. + // +optional + HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` + // ipsecConfig enables and configures IPsec for pods on the pod network within the + // cluster. + // +optional + // +kubebuilder:default={"mode": "Disabled"} + // +default={"mode": "Disabled"} + IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"` + // policyAuditConfig is the configuration for network policy audit events. If unset, + // reported defaults are used. + // +optional + PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"` + // gatewayConfig holds the configuration for node gateway options. + // +optional + GatewayConfig *GatewayConfig `json:"gatewayConfig,omitempty"` + // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. + // Default is 100.64.0.0/16 + // +optional + V4InternalSubnet string `json:"v4InternalSubnet,omitempty"` + // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. + // Default is fd98::/64 + // +optional + V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` + // egressIPConfig holds the configuration for EgressIP options. + // +optional + EgressIPConfig EgressIPConfig `json:"egressIPConfig,omitempty"` + // ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, + // this means no opinions and the default configuration is used. Check individual + // fields within ipv4 for details of default values. + // +optional + IPv4 *IPv4OVNKubernetesConfig `json:"ipv4,omitempty"` + // ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, + // this means no opinions and the default configuration is used. Check individual + // fields within ipv4 for details of default values. + // +optional + IPv6 *IPv6OVNKubernetesConfig `json:"ipv6,omitempty"` + + // routeAdvertisements determines if the functionality to advertise cluster + // network routes through a dynamic routing protocol, such as BGP, is + // enabled or not. This functionality is configured through the + // ovn-kubernetes RouteAdvertisements CRD. Requires the 'FRR' routing + // capability provider to be enabled as an additional routing capability. + // Allowed values are "Enabled", "Disabled" and ommited. When omitted, this + // means the user has no opinion and the platform is left to choose + // reasonable defaults. These defaults are subject to change over time. The + // current default is "Disabled". + // +openshift:enable:FeatureGate=RouteAdvertisements + // +optional + RouteAdvertisements RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` +} + +type IPv4OVNKubernetesConfig struct { + // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally + // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect + // architecture that connects the cluster routers on each node together to enable + // east west traffic. The subnet chosen should not overlap with other networks + // specified for OVN-Kubernetes as well as other networks used on the host. + // When ommitted, this means no opinion and the platform is left to choose a reasonable + // default which is subject to change over time. + // The current default subnet is 100.88.0.0/16 + // The subnet must be large enough to accommodate one IP per node in your cluster + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` + // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. + // The current default value is 100.64.0.0/16 + // The subnet must be large enough to accommodate one IP per node in your cluster + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` +} + +type IPv6OVNKubernetesConfig struct { + // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally + // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect + // architecture that connects the cluster routers on each node together to enable + // east west traffic. The subnet chosen should not overlap with other networks + // specified for OVN-Kubernetes as well as other networks used on the host. + // When ommitted, this means no opinion and the platform is left to choose a reasonable + // default which is subject to change over time. + // The subnet must be large enough to accommodate one IP per node in your cluster + // The current default subnet is fd97::/64 + // The value must be in proper IPV6 CIDR format + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:MaxLength=48 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` + // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. + // The subnet must be large enough to accommodate one IP per node in your cluster + // The current default value is fd98::/64 + // The value must be in proper IPV6 CIDR format + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:MaxLength=48 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` +} + +type HybridOverlayConfig struct { + // hybridClusterNetwork defines a network space given to nodes on an additional overlay network. + // +listType=atomic + HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` + // hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // Default is 4789 + // +optional + HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="self == oldSelf || has(self.mode)",message="ipsecConfig.mode is required" +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Full' ? true : !has(self.full)",message="full is forbidden when mode is not Full" +// +union +type IPsecConfig struct { + // mode defines the behaviour of the ipsec configuration within the platform. + // Valid values are `Disabled`, `External` and `Full`. + // When 'Disabled', ipsec will not be enabled at the node level. + // When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. + // This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. + // When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. + // Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), + // this is left to the user to configure. + // +kubebuilder:validation:Enum=Disabled;External;Full + // +optional + // +unionDiscriminator + Mode IPsecMode `json:"mode,omitempty"` + + // full defines configuration parameters for the IPsec `Full` mode. + // This is permitted only when mode is configured with `Full`, + // and forbidden otherwise. + // +unionMember,optional + // +optional + Full *IPsecFullModeConfig `json:"full,omitempty"` +} + +type Encapsulation string + +const ( + // EncapsulationAlways always enable UDP encapsulation regardless of whether NAT is detected. + EncapsulationAlways = "Always" + // EncapsulationAuto enable UDP encapsulation based on the detection of NAT. + EncapsulationAuto = "Auto" +) + +// IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode. +// +kubebuilder:validation:MinProperties:=1 +type IPsecFullModeConfig struct { + // encapsulation option to configure libreswan on how inter-pod traffic across nodes + // are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + // for the encapsulation. + // Valid values are Always, Auto and omitted. + // Always means enable UDP encapsulation regardless of whether NAT is detected. + // Auto means enable UDP encapsulation based on the detection of NAT. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is Auto. + // +kubebuilder:validation:Enum:=Always;Auto + // +optional + Encapsulation Encapsulation `json:"encapsulation,omitempty"` +} + +type IPForwardingMode string + +const ( + // IPForwardingRestricted limits the IP forwarding on OVN-Kube managed interfaces (br-ex, br-ex1) to only required + // service and other k8s related traffic + IPForwardingRestricted IPForwardingMode = "Restricted" + + // IPForwardingGlobal allows all IP traffic to be forwarded across OVN-Kube managed interfaces + IPForwardingGlobal IPForwardingMode = "Global" +) + +// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides +type GatewayConfig struct { + // routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + // into the host before sending it out. If this is not set, traffic will always egress directly + // from OVN to outside without touching the host stack. Setting this to true means hardware + // offload will not be supported. Default is false if GatewayConfig is specified. + // +kubebuilder:default:=false + // +optional + RoutingViaHost bool `json:"routingViaHost,omitempty"` + // ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other + // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across + // OVN-Kubernetes managed interfaces, then set this field to "Global". + // The supported values are "Restricted" and "Global". + // +optional + IPForwarding IPForwardingMode `json:"ipForwarding,omitempty"` + // ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default + // configuration is used. Check individual members fields within ipv4 for details of default values. + // +optional + IPv4 IPv4GatewayConfig `json:"ipv4,omitempty"` + // ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default + // configuration is used. Check individual members fields within ipv6 for details of default values. + // +optional + IPv6 IPv6GatewayConfig `json:"ipv6,omitempty"` +} + +// IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes +type IPv4GatewayConfig struct { + // internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by + // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these + // addresses, as well as the shared gateway bridge interface. The values can be changed after + // installation. The subnet chosen should not overlap with other networks specified for + // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must + // be large enough to accommodate 6 IPs (maximum prefix length /29). + // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. + // The current default subnet is 169.254.0.0/17 + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 29",message="subnet must be in the range /0 to /29 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` +} + +// IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes +type IPv6GatewayConfig struct { + // internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by + // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these + // addresses, as well as the shared gateway bridge interface. The values can be changed after + // installation. The subnet chosen should not overlap with other networks specified for + // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must + // be large enough to accommodate 6 IPs (maximum prefix length /125). + // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. + // The current default subnet is fd69::/112 + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` +} + +type ExportNetworkFlows struct { + // netFlow defines the NetFlow configuration. + // +optional + NetFlow *NetFlowConfig `json:"netFlow,omitempty"` + // sFlow defines the SFlow configuration. + // +optional + SFlow *SFlowConfig `json:"sFlow,omitempty"` + // ipfix defines IPFIX configuration. + // +optional + IPFIX *IPFIXConfig `json:"ipfix,omitempty"` +} + +type NetFlowConfig struct { + // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. + // It is a list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +type SFlowConfig struct { + // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +type IPFIXConfig struct { + // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` +type IPPort string + +type PolicyAuditConfig struct { + // rateLimit is the approximate maximum number of messages to generate per-second per-node. If + // unset the default of 20 msg/sec is used. + // +kubebuilder:default=20 + // +kubebuilder:validation:Minimum=1 + // +optional + RateLimit *uint32 `json:"rateLimit,omitempty"` + + // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs + // Units are in MB and the Default is 50MB + // +kubebuilder:default=50 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + + // maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxLogFiles *int32 `json:"maxLogFiles,omitempty"` + + // destination is the location for policy log messages. + // Regardless of this config, persistent logs will always be dumped to the host + // at /var/log/ovn/ however + // Additionally syslog output may be configured as follows. + // Valid values are: + // - "libc" -> to use the libc syslog() function of the host node's journdald process + // - "udp:host:port" -> for sending syslog over UDP + // - "unix:file" -> for using the UNIX domain socket directly + // - "null" -> to discard all messages logged to syslog + // The default is "null" + // +kubebuilder:default=null + // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$' + // +optional + Destination string `json:"destination,omitempty"` + + // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + // +kubebuilder:default=local0 + // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + SyslogFacility string `json:"syslogFacility,omitempty"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyArgumentList is a list of arguments to pass to the kubeproxy process +// +listType=atomic +type ProxyArgumentList []string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted + // in large clusters for performance reasons, but this is no longer necessary, and there is no reason + // to change this from the default value. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +// EgressIPConfig defines the configuration knobs for egressip +type EgressIPConfig struct { + // reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. + // If the EgressIP node cannot be reached within this timeout, the node is declared down. + // Setting a large value may cause the EgressIP feature to react slowly to node changes. + // In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is 1 second. + // A value of 0 disables the EgressIP node's reachability check. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=60 + // +optional + ReachabilityTotalTimeoutSeconds *uint32 `json:"reachabilityTotalTimeoutSeconds,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured. + // DEPRECATED: OpenShift SDN is no longer supported + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes plugin will be configured. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" + + // NetworkTypeSimpleMacvlan + NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" +) + +// SDNMode is the Mode the openshift-sdn plugin is in. +// DEPRECATED: OpenShift SDN is no longer supported +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) + +// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin +// config values. See "man ip-link" for its detail. +type MacvlanMode string + +const ( + // MacvlanModeBridge is the macvlan with thin bridge function. + MacvlanModeBridge MacvlanMode = "Bridge" + // MacvlanModePrivate + MacvlanModePrivate MacvlanMode = "Private" + // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator + // (802.1qbg) swtich + MacvlanModeVEPA MacvlanMode = "VEPA" + // MacvlanModePassthru + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// IPAMType describes the IP address management type to configure +type IPAMType string + +const ( + // IPAMTypeDHCP uses DHCP for IP management + IPAMTypeDHCP IPAMType = "DHCP" + // IPAMTypeStatic uses static IP + IPAMTypeStatic IPAMType = "Static" +) + +// IPsecMode enumerates the modes for IPsec configuration +type IPsecMode string + +const ( + // IPsecModeDisabled disables IPsec altogether + IPsecModeDisabled IPsecMode = "Disabled" + // IPsecModeExternal enables IPsec on the node level, but expects the user to configure it using k8s-nmstate or + // other means - it is most useful for secure communication from the cluster to external endpoints + IPsecModeExternal IPsecMode = "External" + // IPsecModeFull enables IPsec on the node level (the same as IPsecModeExternal), and configures it to secure communication + // between pods on the cluster network. + IPsecModeFull IPsecMode = "Full" +) + +// +kubebuilder:validation:Enum:="";"Enabled";"Disabled" +type RouteAdvertisementsEnablement string + +var ( + // RouteAdvertisementsEnabled enables route advertisements for ovn-kubernetes + RouteAdvertisementsEnabled RouteAdvertisementsEnablement = "Enabled" + // RouteAdvertisementsDisabled disables route advertisements for ovn-kubernetes + RouteAdvertisementsDisabled RouteAdvertisementsEnablement = "Disabled" +) + +// RoutingCapabilitiesProvider is a component providing routing capabilities. +// +kubebuilder:validation:Enum=FRR +type RoutingCapabilitiesProvider string + +const ( + // RoutingCapabilitiesProviderFRR determines FRR is providing advanced + // routing capabilities. + RoutingCapabilitiesProviderFRR RoutingCapabilitiesProvider = "FRR" +) + +// AdditionalRoutingCapabilities describes components and relevant configuration providing +// advanced routing capabilities. +type AdditionalRoutingCapabilities struct { + // providers is a set of enabled components that provide additional routing + // capabilities. Entries on this list must be unique. The only valid value + // is currrently "FRR" which provides FRR routing capabilities through the + // deployment of FRR. + // +listType=atomic + // +required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=1 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + Providers []RoutingCapabilitiesProvider `json:"providers"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_olm.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_olm.go new file mode 100644 index 000000000..07c94ece2 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_olm.go @@ -0,0 +1,61 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=olms,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 +// +openshift:enable:FeatureGate=NewOLM +// +openshift:capability=OperatorLifecycleManagerV1 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + //+kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OLM `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go new file mode 100644 index 000000000..a96e033cb --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -0,0 +1,59 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openshiftapiservers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_30,operatorName=openshift-apiserver,operatorOrdering=01 + +// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the OpenShift API Server. + // +required + Spec OpenShiftAPIServerSpec `json:"spec"` + + // status defines the observed status of the OpenShift API Server. + // +optional + Status OpenShiftAPIServerStatus `json:"status"` +} + +type OpenShiftAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OpenShiftAPIServer `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go new file mode 100644 index 000000000..8a553a057 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -0,0 +1,56 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openshiftcontrollermanagers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=openshift-controller-manager,operatorOrdering=02 + +// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec OpenShiftControllerManagerSpec `json:"spec"` + // +optional + Status OpenShiftControllerManagerStatus `json:"status"` +} + +type OpenShiftControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManagerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OpenShiftControllerManager `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_scheduler.go new file mode 100644 index 000000000..cfb04e8d9 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -0,0 +1,59 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubeschedulers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-scheduler,operatorOrdering=01 + +// KubeScheduler provides information to configure an operator to manage scheduler. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeScheduler struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Scheduler + // +required + Spec KubeSchedulerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Scheduler + // +optional + Status KubeSchedulerStatus `json:"status"` +} + +type KubeSchedulerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeSchedulerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeSchedulerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeScheduler `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_serviceca.go new file mode 100644 index 000000000..48534d4c6 --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -0,0 +1,58 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=servicecas,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=service-ca,operatorOrdering=02 + +// ServiceCA provides information to configure an operator to manage the service cert controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCA struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +required + Spec ServiceCASpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ServiceCAStatus `json:"status"` +} + +type ServiceCASpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCAStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCAList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCAList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCA `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go new file mode 100644 index 000000000..e058c065a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -0,0 +1,53 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec ServiceCatalogAPIServerSpec `json:"spec"` + // +optional + Status ServiceCatalogAPIServerStatus `json:"status"` +} + +type ServiceCatalogAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServerList is a collection of items +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCatalogAPIServer `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go new file mode 100644 index 000000000..4fe2aa46a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -0,0 +1,53 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec ServiceCatalogControllerManagerSpec `json:"spec"` + // +optional + Status ServiceCatalogControllerManagerStatus `json:"status"` +} + +type ServiceCatalogControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManagerList is a collection of items +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCatalogControllerManager `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_storage.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_storage.go new file mode 100644 index 000000000..69691a83a --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -0,0 +1,79 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=storages,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/670 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=storage,operatorOrdering=01 + +// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Storage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec StorageSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status StorageStatus `json:"status"` +} + +// StorageDriverType indicates whether CSI migration should be enabled for drivers where it is optional. +// +kubebuilder:validation:Enum="";LegacyDeprecatedInTreeDriver;CSIWithMigrationDriver +type StorageDriverType string + +const ( + LegacyDeprecatedInTreeDriver StorageDriverType = "LegacyDeprecatedInTreeDriver" + CSIWithMigrationDriver StorageDriverType = "CSIWithMigrationDriver" +) + +// StorageSpec is the specification of the desired behavior of the cluster storage operator. +type StorageSpec struct { + OperatorSpec `json:",inline"` + + // vsphereStorageDriver indicates the storage driver to use on VSphere clusters. + // Once this field is set to CSIWithMigrationDriver, it can not be changed. + // If this is empty, the platform will choose a good default, + // which may change over time without notice. + // The current default is CSIWithMigrationDriver and may not be changed. + // DEPRECATED: This field will be removed in a future release. + // +kubebuilder:validation:XValidation:rule="self != \"LegacyDeprecatedInTreeDriver\"",message="VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + // +optional + VSphereStorageDriver StorageDriverType `json:"vsphereStorageDriver"` +} + +// StorageStatus defines the observed status of the cluster storage operator. +type StorageStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageList contains a list of Storages. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type StorageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Storage `json:"items"` +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d8f3cbc2f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -0,0 +1,5436 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + authorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCSIDriverConfigSpec) DeepCopyInto(out *AWSCSIDriverConfigSpec) { + *out = *in + if in.EFSVolumeMetrics != nil { + in, out := &in.EFSVolumeMetrics, &out.EFSVolumeMetrics + *out = new(AWSEFSVolumeMetrics) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCSIDriverConfigSpec. +func (in *AWSCSIDriverConfigSpec) DeepCopy() *AWSCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AWSCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { + *out = *in + out.ConnectionIdleTimeout = in.ConnectionIdleTimeout + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = new(AWSSubnets) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters. +func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSClassicLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEFSVolumeMetrics) DeepCopyInto(out *AWSEFSVolumeMetrics) { + *out = *in + if in.RecursiveWalk != nil { + in, out := &in.RecursiveWalk, &out.RecursiveWalk + *out = new(AWSEFSVolumeMetricsRecursiveWalkConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEFSVolumeMetrics. +func (in *AWSEFSVolumeMetrics) DeepCopy() *AWSEFSVolumeMetrics { + if in == nil { + return nil + } + out := new(AWSEFSVolumeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEFSVolumeMetricsRecursiveWalkConfig) DeepCopyInto(out *AWSEFSVolumeMetricsRecursiveWalkConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEFSVolumeMetricsRecursiveWalkConfig. +func (in *AWSEFSVolumeMetricsRecursiveWalkConfig) DeepCopy() *AWSEFSVolumeMetricsRecursiveWalkConfig { + if in == nil { + return nil + } + out := new(AWSEFSVolumeMetricsRecursiveWalkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) { + *out = *in + if in.ClassicLoadBalancerParameters != nil { + in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters + *out = new(AWSClassicLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkLoadBalancerParameters != nil { + in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters + *out = new(AWSNetworkLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters. +func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = new(AWSSubnets) + (*in).DeepCopyInto(*out) + } + if in.EIPAllocations != nil { + in, out := &in.EIPAllocations, &out.EIPAllocations + *out = make([]EIPAllocation, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters. +func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSNetworkLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSubnets) DeepCopyInto(out *AWSSubnets) { + *out = *in + if in.IDs != nil { + in, out := &in.IDs, &out.IDs + *out = make([]AWSSubnetID, len(*in)) + copy(*out, *in) + } + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]AWSSubnetName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSubnets. +func (in *AWSSubnets) DeepCopy() *AWSSubnets { + if in == nil { + return nil + } + out := new(AWSSubnets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogging) DeepCopyInto(out *AccessLogging) { + *out = *in + in.Destination.DeepCopyInto(&out.Destination) + in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders) + if in.HTTPCaptureCookies != nil { + in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies + *out = make([]IngressControllerCaptureHTTPCookie, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging. +func (in *AccessLogging) DeepCopy() *AccessLogging { + if in == nil { + return nil + } + out := new(AccessLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddPage) DeepCopyInto(out *AddPage) { + *out = *in + if in.DisabledActions != nil { + in, out := &in.DisabledActions, &out.DisabledActions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage. +func (in *AddPage) DeepCopy() *AddPage { + if in == nil { + return nil + } + out := new(AddPage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + if in.SimpleMacvlanConfig != nil { + in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig + *out = new(SimpleMacvlanConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalRoutingCapabilities) DeepCopyInto(out *AdditionalRoutingCapabilities) { + *out = *in + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]RoutingCapabilitiesProvider, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalRoutingCapabilities. +func (in *AdditionalRoutingCapabilities) DeepCopy() *AdditionalRoutingCapabilities { + if in == nil { + return nil + } + out := new(AdditionalRoutingCapabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.OAuthAPIServer = in.OAuthAPIServer + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureCSIDriverConfigSpec) DeepCopyInto(out *AzureCSIDriverConfigSpec) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(AzureDiskEncryptionSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCSIDriverConfigSpec. +func (in *AzureCSIDriverConfigSpec) DeepCopy() *AzureCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AzureCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskEncryptionSet) DeepCopyInto(out *AzureDiskEncryptionSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskEncryptionSet. +func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { + if in == nil { + return nil + } + out := new(AzureDiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudCSIDriverConfigSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSphereCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverConfigSpec. +func (in *CSIDriverConfigSpec) DeepCopy() *CSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(CSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. +func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { + if in == nil { + return nil + } + out := new(CSISnapshotController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSISnapshotController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. +func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { + if in == nil { + return nil + } + out := new(CSISnapshotControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. +func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { + if in == nil { + return nil + } + out := new(CSISnapshotControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. +func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { + if in == nil { + return nil + } + out := new(CSISnapshotControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capability) DeepCopyInto(out *Capability) { + *out = *in + out.Visibility = in.Visibility + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capability. +func (in *Capability) DeepCopy() *Capability { + if in == nil { + return nil + } + out := new(Capability) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapabilityVisibility) DeepCopyInto(out *CapabilityVisibility) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapabilityVisibility. +func (in *CapabilityVisibility) DeepCopy() *CapabilityVisibility { + if in == nil { + return nil + } + out := new(CapabilityVisibility) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { + *out = *in + out.ClientCA = in.ClientCA + if in.AllowedSubjectPatterns != nil { + in, out := &in.AllowedSubjectPatterns, &out.AllowedSubjectPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. +func (in *ClientTLS) DeepCopy() *ClientTLS { + if in == nil { + return nil + } + out := new(ClientTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredential) DeepCopyInto(out *CloudCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential. +func (in *CloudCredential) DeepCopy() *CloudCredential { + if in == nil { + return nil + } + out := new(CloudCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudCredential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList. +func (in *CloudCredentialList) DeepCopy() *CloudCredentialList { + if in == nil { + return nil + } + out := new(CloudCredentialList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredentialList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec. +func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec { + if in == nil { + return nil + } + out := new(CloudCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus. +func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { + if in == nil { + return nil + } + out := new(CloudCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver. +func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver { + if in == nil { + return nil + } + out := new(ClusterCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList. +func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList { + if in == nil { + return nil + } + out := new(ClusterCSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.DriverConfig.DeepCopyInto(&out.DriverConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec. +func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec { + if in == nil { + return nil + } + out := new(ClusterCSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus. +func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus { + if in == nil { + return nil + } + out := new(ClusterCSIDriverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute. +func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { + if in == nil { + return nil + } + out := new(ConsoleConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { + *out = *in + if in.Logos != nil { + in, out := &in.Logos, &out.Logos + *out = make([]Logo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + out.CustomLogoFile = in.CustomLogoFile + in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog) + in.ProjectAccess.DeepCopyInto(&out.ProjectAccess) + in.QuickStarts.DeepCopyInto(&out.QuickStarts) + in.AddPage.DeepCopyInto(&out.AddPage) + if in.Perspectives != nil { + in, out := &in.Perspectives, &out.Perspectives + *out = make([]Perspective, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. +func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { + if in == nil { + return nil + } + out := new(ConsoleCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { + *out = *in + if in.Statuspage != nil { + in, out := &in.Statuspage, &out.Statuspage + *out = new(StatuspageProvider) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. +func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { + if in == nil { + return nil + } + out := new(ConsoleProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.Customization.DeepCopyInto(&out.Customization) + in.Providers.DeepCopyInto(&out.Providers) + out.Route = in.Route + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Ingress = in.Ingress + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters. +func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(ContainerLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSCache) DeepCopyInto(out *DNSCache) { + *out = *in + out.PositiveTTL = in.PositiveTTL + out.NegativeTTL = in.NegativeTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSCache. +func (in *DNSCache) DeepCopy() *DNSCache { + if in == nil { + return nil + } + out := new(DNSCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement. +func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { + if in == nil { + return nil + } + out := new(DNSNodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOverTLSConfig) DeepCopyInto(out *DNSOverTLSConfig) { + *out = *in + out.CABundle = in.CABundle + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOverTLSConfig. +func (in *DNSOverTLSConfig) DeepCopy() *DNSOverTLSConfig { + if in == nil { + return nil + } + out := new(DNSOverTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpstreamResolvers.DeepCopyInto(&out.UpstreamResolvers) + in.NodePlacement.DeepCopyInto(&out.NodePlacement) + out.Cache = in.Cache + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTransportConfig) DeepCopyInto(out *DNSTransportConfig) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(DNSOverTLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTransportConfig. +func (in *DNSTransportConfig) DeepCopy() *DNSTransportConfig { + if in == nil { + return nil + } + out := new(DNSTransportConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) { + *out = *in + in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta) + if in.Subcategories != nil { + in, out := &in.Subcategories, &out.Subcategories + *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory. +func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategoryMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]DeveloperConsoleCatalogCategory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Types.DeepCopyInto(&out.Types) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization. +func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogTypes) DeepCopyInto(out *DeveloperConsoleCatalogTypes) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogTypes. +func (in *DeveloperConsoleCatalogTypes) DeepCopy() *DeveloperConsoleCatalogTypes { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogTypes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressIPConfig) DeepCopyInto(out *EgressIPConfig) { + *out = *in + if in.ReachabilityTotalTimeoutSeconds != nil { + in, out := &in.ReachabilityTotalTimeoutSeconds, &out.ReachabilityTotalTimeoutSeconds + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressIPConfig. +func (in *EgressIPConfig) DeepCopy() *EgressIPConfig { + if in == nil { + return nil + } + out := new(EgressIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerStrategy) + (*in).DeepCopyInto(*out) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(HostNetworkStrategy) + **out = **in + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(PrivateStrategy) + **out = **in + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. +func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { + if in == nil { + return nil + } + out := new(EndpointPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Etcd) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdList) DeepCopyInto(out *EtcdList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Etcd, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. +func (in *EtcdList) DeepCopy() *EtcdList { + if in == nil { + return nil + } + out := new(EtcdList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. +func (in *EtcdStatus) DeepCopy() *EtcdStatus { + if in == nil { + return nil + } + out := new(EtcdStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) { + *out = *in + if in.NetFlow != nil { + in, out := &in.NetFlow, &out.NetFlow + *out = new(NetFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.SFlow != nil { + in, out := &in.SFlow, &out.SFlow + *out = new(SFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.IPFIX != nil { + in, out := &in.IPFIX, &out.IPFIX + *out = new(IPFIXConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows. +func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows { + if in == nil { + return nil + } + out := new(ExportNetworkFlows) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeaturesMigration) DeepCopyInto(out *FeaturesMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesMigration. +func (in *FeaturesMigration) DeepCopy() *FeaturesMigration { + if in == nil { + return nil + } + out := new(FeaturesMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileReferenceSource) DeepCopyInto(out *FileReferenceSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapFileReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileReferenceSource. +func (in *FileReferenceSource) DeepCopy() *FileReferenceSource { + if in == nil { + return nil + } + out := new(FileReferenceSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. +func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { + if in == nil { + return nil + } + out := new(ForwardPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPCSIDriverConfigSpec) DeepCopyInto(out *GCPCSIDriverConfigSpec) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPCSIDriverConfigSpec. +func (in *GCPCSIDriverConfigSpec) DeepCopy() *GCPCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(GCPCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters. +func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters { + if in == nil { + return nil + } + out := new(GCPLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayConfig) DeepCopyInto(out *GatewayConfig) { + *out = *in + out.IPv4 = in.IPv4 + out.IPv6 = in.IPv6 + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayConfig. +func (in *GatewayConfig) DeepCopy() *GatewayConfig { + if in == nil { + return nil + } + out := new(GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherStatus) DeepCopyInto(out *GatherStatus) { + *out = *in + in.LastGatherTime.DeepCopyInto(&out.LastGatherTime) + out.LastGatherDuration = in.LastGatherDuration + if in.Gatherers != nil { + in, out := &in.Gatherers, &out.Gatherers + *out = make([]GathererStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherStatus. +func (in *GatherStatus) DeepCopy() *GatherStatus { + if in == nil { + return nil + } + out := new(GatherStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererStatus) DeepCopyInto(out *GathererStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.LastGatherDuration = in.LastGatherDuration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererStatus. +func (in *GathererStatus) DeepCopy() *GathererStatus { + if in == nil { + return nil + } + out := new(GathererStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCompressionPolicy) DeepCopyInto(out *HTTPCompressionPolicy) { + *out = *in + if in.MimeTypes != nil { + in, out := &in.MimeTypes, &out.MimeTypes + *out = make([]CompressionMIMEType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCompressionPolicy. +func (in *HTTPCompressionPolicy) DeepCopy() *HTTPCompressionPolicy { + if in == nil { + return nil + } + out := new(HTTPCompressionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. +func (in *HealthCheck) DeepCopy() *HealthCheck { + if in == nil { + return nil + } + out := new(HealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. +func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { + if in == nil { + return nil + } + out := new(HostNetworkStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { + *out = *in + if in.HybridClusterNetwork != nil { + in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.HybridOverlayVXLANPort != nil { + in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. +func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { + if in == nil { + return nil + } + out := new(HybridOverlayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopyInto(out *IBMCloudCSIDriverConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudCSIDriverConfigSpec. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopy() *IBMCloudCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(IBMCloudCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMLoadBalancerParameters) DeepCopyInto(out *IBMLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMLoadBalancerParameters. +func (in *IBMLoadBalancerParameters) DeepCopy() *IBMLoadBalancerParameters { + if in == nil { + return nil + } + out := new(IBMLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.StaticIPAMConfig != nil { + in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig + *out = new(StaticIPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig. +func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { + if in == nil { + return nil + } + out := new(IPFIXConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { + *out = *in + if in.Full != nil { + in, out := &in.Full, &out.Full + *out = new(IPsecFullModeConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig. +func (in *IPsecConfig) DeepCopy() *IPsecConfig { + if in == nil { + return nil + } + out := new(IPsecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecFullModeConfig) DeepCopyInto(out *IPsecFullModeConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecFullModeConfig. +func (in *IPsecFullModeConfig) DeepCopy() *IPsecFullModeConfig { + if in == nil { + return nil + } + out := new(IPsecFullModeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4GatewayConfig) DeepCopyInto(out *IPv4GatewayConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4GatewayConfig. +func (in *IPv4GatewayConfig) DeepCopy() *IPv4GatewayConfig { + if in == nil { + return nil + } + out := new(IPv4GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4OVNKubernetesConfig) DeepCopyInto(out *IPv4OVNKubernetesConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4OVNKubernetesConfig. +func (in *IPv4OVNKubernetesConfig) DeepCopy() *IPv4OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(IPv4OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6GatewayConfig) DeepCopyInto(out *IPv6GatewayConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6GatewayConfig. +func (in *IPv6GatewayConfig) DeepCopy() *IPv6GatewayConfig { + if in == nil { + return nil + } + out := new(IPv6GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6OVNKubernetesConfig) DeepCopyInto(out *IPv6OVNKubernetesConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6OVNKubernetesConfig. +func (in *IPv6OVNKubernetesConfig) DeepCopy() *IPv6OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(IPv6OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) { + *out = *in + out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie. +func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookieUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader. +func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeader) DeepCopyInto(out *IngressControllerHTTPHeader) { + *out = *in + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeader. +func (in *IngressControllerHTTPHeader) DeepCopy() *IngressControllerHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopyInto(out *IngressControllerHTTPHeaderActionUnion) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = new(IngressControllerSetHTTPHeader) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActionUnion. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopy() *IngressControllerHTTPHeaderActionUnion { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActionUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActions) DeepCopyInto(out *IngressControllerHTTPHeaderActions) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActions. +func (in *IngressControllerHTTPHeaderActions) DeepCopy() *IngressControllerHTTPHeaderActions { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { + *out = *in + out.UniqueId = in.UniqueId + if in.HeaderNameCaseAdjustments != nil { + in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments + *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) + copy(*out, *in) + } + in.Actions.DeepCopyInto(&out.Actions) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders. +func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy { + if in == nil { + return nil + } + out := new(IngressControllerHTTPUniqueIdHeaderPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. +func (in *IngressControllerList) DeepCopy() *IngressControllerList { + if in == nil { + return nil + } + out := new(IngressControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(AccessLogging) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging. +func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { + if in == nil { + return nil + } + out := new(IngressControllerLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSetHTTPHeader) DeepCopyInto(out *IngressControllerSetHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSetHTTPHeader. +func (in *IngressControllerSetHTTPHeader) DeepCopy() *IngressControllerSetHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerSetHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { + *out = *in + out.HttpErrorCodePages = in.HttpErrorCodePages + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.DefaultCertificate != nil { + in, out := &in.DefaultCertificate, &out.DefaultCertificate + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.ClientTLS.DeepCopyInto(&out.ClientTLS) + if in.RouteAdmission != nil { + in, out := &in.RouteAdmission, &out.RouteAdmission + *out = new(RouteAdmissionPolicy) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(IngressControllerLogging) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(IngressControllerHTTPHeaders) + (*in).DeepCopyInto(*out) + } + in.TuningOptions.DeepCopyInto(&out.TuningOptions) + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.HTTPCompression.DeepCopyInto(&out.HTTPCompression) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. +func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { + if in == nil { + return nil + } + out := new(IngressControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { + *out = *in + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. +func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { + if in == nil { + return nil + } + out := new(IngressControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) { + *out = *in + if in.ClientTimeout != nil { + in, out := &in.ClientTimeout, &out.ClientTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ClientFinTimeout != nil { + in, out := &in.ClientFinTimeout, &out.ClientFinTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ServerTimeout != nil { + in, out := &in.ServerTimeout, &out.ServerTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ServerFinTimeout != nil { + in, out := &in.ServerFinTimeout, &out.ServerFinTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.TunnelTimeout != nil { + in, out := &in.TunnelTimeout, &out.TunnelTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.TLSInspectDelay != nil { + in, out := &in.TLSInspectDelay, &out.TLSInspectDelay + *out = new(metav1.Duration) + **out = **in + } + if in.HealthCheckInterval != nil { + in, out := &in.HealthCheckInterval, &out.HealthCheckInterval + *out = new(metav1.Duration) + **out = **in + } + out.ReloadInterval = in.ReloadInterval + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions. +func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions { + if in == nil { + return nil + } + out := new(IngressControllerTuningOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperator) DeepCopyInto(out *InsightsOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperator. +func (in *InsightsOperator) DeepCopy() *InsightsOperator { + if in == nil { + return nil + } + out := new(InsightsOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorList) DeepCopyInto(out *InsightsOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorList. +func (in *InsightsOperatorList) DeepCopy() *InsightsOperatorList { + if in == nil { + return nil + } + out := new(InsightsOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorSpec) DeepCopyInto(out *InsightsOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorSpec. +func (in *InsightsOperatorSpec) DeepCopy() *InsightsOperatorSpec { + if in == nil { + return nil + } + out := new(InsightsOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorStatus) DeepCopyInto(out *InsightsOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + in.GatherStatus.DeepCopyInto(&out.GatherStatus) + in.InsightsReport.DeepCopyInto(&out.InsightsReport) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorStatus. +func (in *InsightsOperatorStatus) DeepCopy() *InsightsOperatorStatus { + if in == nil { + return nil + } + out := new(InsightsOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsReport) DeepCopyInto(out *InsightsReport) { + *out = *in + in.DownloadedAt.DeepCopyInto(&out.DownloadedAt) + if in.HealthChecks != nil { + in, out := &in.HealthChecks, &out.HealthChecks + *out = make([]HealthCheck, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsReport. +func (in *InsightsReport) DeepCopy() *InsightsReport { + if in == nil { + return nil + } + out := new(InsightsReport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. +func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { + if in == nil { + return nil + } + out := new(KubeAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. +func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { + if in == nil { + return nil + } + out := new(KubeAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. +func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { + if in == nil { + return nil + } + out := new(KubeAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + if in.ServiceAccountIssuers != nil { + in, out := &in.ServiceAccountIssuers, &out.ServiceAccountIssuers + *out = make([]ServiceAccountIssuerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. +func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { + if in == nil { + return nil + } + out := new(KubeAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. +func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { + if in == nil { + return nil + } + out := new(KubeControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. +func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { + if in == nil { + return nil + } + out := new(KubeControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. +func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { + if in == nil { + return nil + } + out := new(KubeControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. +func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { + if in == nil { + return nil + } + out := new(KubeControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. +func (in *KubeScheduler) DeepCopy() *KubeScheduler { + if in == nil { + return nil + } + out := new(KubeScheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeScheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeScheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. +func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { + if in == nil { + return nil + } + out := new(KubeSchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. +func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { + if in == nil { + return nil + } + out := new(KubeSchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. +func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { + if in == nil { + return nil + } + out := new(KubeSchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. +func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeStorageVersionMigrator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. +func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. +func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. +func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { + *out = *in + if in.AllowedSourceRanges != nil { + in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + if in.ProviderParameters != nil { + in, out := &in.ProviderParameters, &out.ProviderParameters + *out = new(ProviderLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. +func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) { + *out = *in + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = new(SyslogLoggingDestinationParameters) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerLoggingDestinationParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination. +func (in *LoggingDestination) DeepCopy() *LoggingDestination { + if in == nil { + return nil + } + out := new(LoggingDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logo) DeepCopyInto(out *Logo) { + *out = *in + if in.Themes != nil { + in, out := &in.Themes, &out.Themes + *out = make([]Theme, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logo. +func (in *Logo) DeepCopy() *Logo { + if in == nil { + return nil + } + out := new(Logo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfiguration) DeepCopyInto(out *MachineConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfiguration. +func (in *MachineConfiguration) DeepCopy() *MachineConfiguration { + if in == nil { + return nil + } + out := new(MachineConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationList) DeepCopyInto(out *MachineConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationList. +func (in *MachineConfigurationList) DeepCopy() *MachineConfigurationList { + if in == nil { + return nil + } + out := new(MachineConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationSpec) DeepCopyInto(out *MachineConfigurationSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + in.ManagedBootImages.DeepCopyInto(&out.ManagedBootImages) + in.NodeDisruptionPolicy.DeepCopyInto(&out.NodeDisruptionPolicy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationSpec. +func (in *MachineConfigurationSpec) DeepCopy() *MachineConfigurationSpec { + if in == nil { + return nil + } + out := new(MachineConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus) + in.ManagedBootImagesStatus.DeepCopyInto(&out.ManagedBootImagesStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationStatus. +func (in *MachineConfigurationStatus) DeepCopy() *MachineConfigurationStatus { + if in == nil { + return nil + } + out := new(MachineConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineManager) DeepCopyInto(out *MachineManager) { + *out = *in + in.Selection.DeepCopyInto(&out.Selection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManager. +func (in *MachineManager) DeepCopy() *MachineManager { + if in == nil { + return nil + } + out := new(MachineManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineManagerSelector) DeepCopyInto(out *MachineManagerSelector) { + *out = *in + if in.Partial != nil { + in, out := &in.Partial, &out.Partial + *out = new(PartialSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManagerSelector. +func (in *MachineManagerSelector) DeepCopy() *MachineManagerSelector { + if in == nil { + return nil + } + out := new(MachineManagerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedBootImages) DeepCopyInto(out *ManagedBootImages) { + *out = *in + if in.MachineManagers != nil { + in, out := &in.MachineManagers, &out.MachineManagers + *out = make([]MachineManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedBootImages. +func (in *ManagedBootImages) DeepCopy() *ManagedBootImages { + if in == nil { + return nil + } + out := new(ManagedBootImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. +func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { + if in == nil { + return nil + } + out := new(MyOperatorResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. +func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { + if in == nil { + return nil + } + out := new(MyOperatorResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. +func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { + if in == nil { + return nil + } + out := new(MyOperatorResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig. +func (in *NetFlowConfig) DeepCopy() *NetFlowConfig { + if in == nil { + return nil + } + out := new(NetFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(FeaturesMigration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + *out = new(bool) + **out = **in + } + if in.UseMultiNetworkPolicy != nil { + in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy + *out = new(bool) + **out = **in + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + *out = new(bool) + **out = **in + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.ExportNetworkFlows != nil { + in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows + *out = new(ExportNetworkFlows) + (*in).DeepCopyInto(*out) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + if in.AdditionalRoutingCapabilities != nil { + in, out := &in.AdditionalRoutingCapabilities, &out.AdditionalRoutingCapabilities + *out = new(AdditionalRoutingCapabilities) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyClusterStatus) DeepCopyInto(out *NodeDisruptionPolicyClusterStatus) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]NodeDisruptionPolicyStatusFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]NodeDisruptionPolicyStatusUnit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.SSHKey.DeepCopyInto(&out.SSHKey) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyClusterStatus. +func (in *NodeDisruptionPolicyClusterStatus) DeepCopy() *NodeDisruptionPolicyClusterStatus { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyConfig) DeepCopyInto(out *NodeDisruptionPolicyConfig) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]NodeDisruptionPolicySpecFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]NodeDisruptionPolicySpecUnit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.SSHKey.DeepCopyInto(&out.SSHKey) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyConfig. +func (in *NodeDisruptionPolicyConfig) DeepCopy() *NodeDisruptionPolicyConfig { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecAction) DeepCopyInto(out *NodeDisruptionPolicySpecAction) { + *out = *in + if in.Reload != nil { + in, out := &in.Reload, &out.Reload + *out = new(ReloadService) + **out = **in + } + if in.Restart != nil { + in, out := &in.Restart, &out.Restart + *out = new(RestartService) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecAction. +func (in *NodeDisruptionPolicySpecAction) DeepCopy() *NodeDisruptionPolicySpecAction { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecFile) DeepCopyInto(out *NodeDisruptionPolicySpecFile) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecFile. +func (in *NodeDisruptionPolicySpecFile) DeepCopy() *NodeDisruptionPolicySpecFile { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecSSHKey) DeepCopyInto(out *NodeDisruptionPolicySpecSSHKey) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecSSHKey. +func (in *NodeDisruptionPolicySpecSSHKey) DeepCopy() *NodeDisruptionPolicySpecSSHKey { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecSSHKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecUnit) DeepCopyInto(out *NodeDisruptionPolicySpecUnit) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecUnit. +func (in *NodeDisruptionPolicySpecUnit) DeepCopy() *NodeDisruptionPolicySpecUnit { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecUnit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatus) DeepCopyInto(out *NodeDisruptionPolicyStatus) { + *out = *in + in.ClusterPolicies.DeepCopyInto(&out.ClusterPolicies) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatus. +func (in *NodeDisruptionPolicyStatus) DeepCopy() *NodeDisruptionPolicyStatus { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusAction) DeepCopyInto(out *NodeDisruptionPolicyStatusAction) { + *out = *in + if in.Reload != nil { + in, out := &in.Reload, &out.Reload + *out = new(ReloadService) + **out = **in + } + if in.Restart != nil { + in, out := &in.Restart, &out.Restart + *out = new(RestartService) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusAction. +func (in *NodeDisruptionPolicyStatusAction) DeepCopy() *NodeDisruptionPolicyStatusAction { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusFile) DeepCopyInto(out *NodeDisruptionPolicyStatusFile) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusFile. +func (in *NodeDisruptionPolicyStatusFile) DeepCopy() *NodeDisruptionPolicyStatusFile { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopyInto(out *NodeDisruptionPolicyStatusSSHKey) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusSSHKey. +func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopy() *NodeDisruptionPolicyStatusSSHKey { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusSSHKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusUnit) DeepCopyInto(out *NodeDisruptionPolicyStatusUnit) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusUnit. +func (in *NodeDisruptionPolicyStatusUnit) DeepCopy() *NodeDisruptionPolicyStatusUnit { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusUnit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. +func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { + if in == nil { + return nil + } + out := new(NodePortStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedTime != nil { + in, out := &in.LastFailedTime, &out.LastFailedTime + *out = (*in).DeepCopy() + } + if in.LastFailedRevisionErrors != nil { + in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus. +func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { + if in == nil { + return nil + } + out := new(OAuthAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } + if in.HybridOverlayConfig != nil { + in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig + *out = new(HybridOverlayConfig) + (*in).DeepCopyInto(*out) + } + if in.IPsecConfig != nil { + in, out := &in.IPsecConfig, &out.IPsecConfig + *out = new(IPsecConfig) + (*in).DeepCopyInto(*out) + } + if in.PolicyAuditConfig != nil { + in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig + *out = new(PolicyAuditConfig) + (*in).DeepCopyInto(*out) + } + if in.GatewayConfig != nil { + in, out := &in.GatewayConfig, &out.GatewayConfig + *out = new(GatewayConfig) + **out = **in + } + in.EgressIPConfig.DeepCopyInto(&out.EgressIPConfig) + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(IPv4OVNKubernetesConfig) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6OVNKubernetesConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. +func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { + if in == nil { + return nil + } + out := new(OpenShiftAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. +func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. +func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. +func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. +func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { + if in == nil { + return nil + } + out := new(OpenShiftControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. +func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. +func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. +func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + *out = new(bool) + **out = **in + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackLoadBalancerParameters) DeepCopyInto(out *OpenStackLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackLoadBalancerParameters. +func (in *OpenStackLoadBalancerParameters) DeepCopy() *OpenStackLoadBalancerParameters { + if in == nil { + return nil + } + out := new(OpenStackLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialSelector) DeepCopyInto(out *PartialSelector) { + *out = *in + if in.MachineResourceSelector != nil { + in, out := &in.MachineResourceSelector, &out.MachineResourceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialSelector. +func (in *PartialSelector) DeepCopy() *PartialSelector { + if in == nil { + return nil + } + out := new(PartialSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Perspective) DeepCopyInto(out *Perspective) { + *out = *in + in.Visibility.DeepCopyInto(&out.Visibility) + if in.PinnedResources != nil { + in, out := &in.PinnedResources, &out.PinnedResources + *out = new([]PinnedResourceReference) + if **in != nil { + in, out := *in, *out + *out = make([]PinnedResourceReference, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Perspective. +func (in *Perspective) DeepCopy() *Perspective { + if in == nil { + return nil + } + out := new(Perspective) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerspectiveVisibility) DeepCopyInto(out *PerspectiveVisibility) { + *out = *in + if in.AccessReview != nil { + in, out := &in.AccessReview, &out.AccessReview + *out = new(ResourceAttributesAccessReview) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerspectiveVisibility. +func (in *PerspectiveVisibility) DeepCopy() *PerspectiveVisibility { + if in == nil { + return nil + } + out := new(PerspectiveVisibility) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PinnedResourceReference) DeepCopyInto(out *PinnedResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinnedResourceReference. +func (in *PinnedResourceReference) DeepCopy() *PinnedResourceReference { + if in == nil { + return nil + } + out := new(PinnedResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { + *out = *in + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(uint32) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(uint32) + **out = **in + } + if in.MaxLogFiles != nil { + in, out := &in.MaxLogFiles, &out.MaxLogFiles + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig. +func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig { + if in == nil { + return nil + } + out := new(PolicyAuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. +func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { + if in == nil { + return nil + } + out := new(PrivateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) { + *out = *in + if in.AvailableClusterRoles != nil { + in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess. +func (in *ProjectAccess) DeepCopy() *ProjectAccess { + if in == nil { + return nil + } + out := new(ProjectAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPLoadBalancerParameters) + **out = **in + } + if in.IBM != nil { + in, out := &in.IBM, &out.IBM + *out = new(IBMLoadBalancerParameters) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters. +func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters { + if in == nil { + return nil + } + out := new(ProviderLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { + { + in := &in + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. +func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { + if in == nil { + return nil + } + out := new(ProxyArgumentList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string]ProxyArgumentList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickStarts) DeepCopyInto(out *QuickStarts) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts. +func (in *QuickStarts) DeepCopy() *QuickStarts { + if in == nil { + return nil + } + out := new(QuickStarts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReloadService) DeepCopyInto(out *ReloadService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReloadService. +func (in *ReloadService) DeepCopy() *ReloadService { + if in == nil { + return nil + } + out := new(ReloadService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributesAccessReview) DeepCopyInto(out *ResourceAttributesAccessReview) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]authorizationv1.ResourceAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Missing != nil { + in, out := &in.Missing, &out.Missing + *out = make([]authorizationv1.ResourceAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributesAccessReview. +func (in *ResourceAttributesAccessReview) DeepCopy() *ResourceAttributesAccessReview { + if in == nil { + return nil + } + out := new(ResourceAttributesAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartService) DeepCopyInto(out *RestartService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartService. +func (in *RestartService) DeepCopy() *RestartService { + if in == nil { + return nil + } + out := new(RestartService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. +func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { + if in == nil { + return nil + } + out := new(RouteAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig. +func (in *SFlowConfig) DeepCopy() *SFlowConfig { + if in == nil { + return nil + } + out := new(SFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIssuerStatus) DeepCopyInto(out *ServiceAccountIssuerStatus) { + *out = *in + if in.ExpirationTime != nil { + in, out := &in.ExpirationTime, &out.ExpirationTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIssuerStatus. +func (in *ServiceAccountIssuerStatus) DeepCopy() *ServiceAccountIssuerStatus { + if in == nil { + return nil + } + out := new(ServiceAccountIssuerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. +func (in *ServiceCA) DeepCopy() *ServiceCA { + if in == nil { + return nil + } + out := new(ServiceCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. +func (in *ServiceCAList) DeepCopy() *ServiceCAList { + if in == nil { + return nil + } + out := new(ServiceCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. +func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { + if in == nil { + return nil + } + out := new(ServiceCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. +func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { + if in == nil { + return nil + } + out := new(ServiceCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. +func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. +func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. +func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. +func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. +func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. +func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. +func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. +func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { + *out = *in + if in.IPAMConfig != nil { + in, out := &in.IPAMConfig, &out.IPAMConfig + *out = new(IPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. +func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { + if in == nil { + return nil + } + out := new(SimpleMacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. +func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { + if in == nil { + return nil + } + out := new(StaticIPAMAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]StaticIPAMAddresses, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]StaticIPAMRoutes, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(StaticIPAMDNS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. +func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { + if in == nil { + return nil + } + out := new(StaticIPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. +func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { + if in == nil { + return nil + } + out := new(StaticIPAMDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. +func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { + if in == nil { + return nil + } + out := new(StaticIPAMRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. +func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { + if in == nil { + return nil + } + out := new(StaticPodOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. +func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { + if in == nil { + return nil + } + out := new(StatuspageProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters. +func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(SyslogLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Theme) DeepCopyInto(out *Theme) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Theme. +func (in *Theme) DeepCopy() *Theme { + if in == nil { + return nil + } + out := new(Theme) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Upstream) DeepCopyInto(out *Upstream) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Upstream. +func (in *Upstream) DeepCopy() *Upstream { + if in == nil { + return nil + } + out := new(Upstream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamResolvers) DeepCopyInto(out *UpstreamResolvers) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]Upstream, len(*in)) + copy(*out, *in) + } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamResolvers. +func (in *UpstreamResolvers) DeepCopy() *UpstreamResolvers { + if in == nil { + return nil + } + out := new(UpstreamResolvers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereCSIDriverConfigSpec) DeepCopyInto(out *VSphereCSIDriverConfigSpec) { + *out = *in + if in.TopologyCategories != nil { + in, out := &in.TopologyCategories, &out.TopologyCategories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GlobalMaxSnapshotsPerBlockVolume != nil { + in, out := &in.GlobalMaxSnapshotsPerBlockVolume, &out.GlobalMaxSnapshotsPerBlockVolume + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVSAN != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVSAN, &out.GranularMaxSnapshotsPerBlockVolumeInVSAN + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVVOL != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVVOL, &out.GranularMaxSnapshotsPerBlockVolumeInVVOL + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereCSIDriverConfigSpec. +func (in *VSphereCSIDriverConfigSpec) DeepCopy() *VSphereCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(VSphereCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..5b2ca202f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,455 @@ +authentications.operator.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: authentications.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: authentication + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +csisnapshotcontrollers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/562 + CRDName: csisnapshotcontrollers.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: csi-snapshot-controller + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: operator.openshift.io + HasStatus: true + KindName: CSISnapshotController + Labels: {} + PluralName: csisnapshotcontrollers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +cloudcredentials.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/692 + CRDName: cloudcredentials.operator.openshift.io + Capability: CloudCredential + Category: "" + FeatureGates: [] + FilenameOperatorName: cloud-credential + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_40" + GroupName: operator.openshift.io + HasStatus: true + KindName: CloudCredential + Labels: {} + PluralName: cloudcredentials + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clustercsidrivers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/701 + CRDName: clustercsidrivers.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - VSphereConfigurableMaxAllowedBlockVolumesPerNode + FilenameOperatorName: csi-driver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: ClusterCSIDriver + Labels: {} + PluralName: clustercsidrivers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +configs.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/612 + CRDName: configs.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: Config + Labels: {} + PluralName: configs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/486 + CRDName: consoles.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: console + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: dnses.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: dns + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_70" + GroupName: operator.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +etcds.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/752 + CRDName: etcds.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: + - EtcdBackendQuota + FilenameOperatorName: etcd + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_12" + GroupName: operator.openshift.io + HasStatus: true + KindName: Etcd + Labels: {} + PluralName: etcds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresscontrollers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/616 + CRDName: ingresscontrollers.operator.openshift.io + Capability: Ingress + Category: "" + FeatureGates: + - IngressControllerLBSubnetsAWS + - SetEIPForNLBIngressController + - SetEIPForNLBIngressController+IngressControllerLBSubnetsAWS + FilenameOperatorName: ingress + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: IngressController + Labels: {} + PluralName: ingresscontrollers + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +insightsoperators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1237 + CRDName: insightsoperators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: insights + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: InsightsOperator + Labels: {} + PluralName: insightsoperators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubeapiservers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubeapiservers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-apiserver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_20" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeAPIServer + Labels: {} + PluralName: kubeapiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubecontrollermanagers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubecontrollermanagers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeControllerManager + Labels: {} + PluralName: kubecontrollermanagers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubeschedulers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubeschedulers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-scheduler + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeScheduler + Labels: {} + PluralName: kubeschedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubestorageversionmigrators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/503 + CRDName: kubestorageversionmigrators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: kube-storage-version-migrator + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_40" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeStorageVersionMigrator + Labels: {} + PluralName: kubestorageversionmigrators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +machineconfigurations.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1453 + CRDName: machineconfigurations.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ManagedBootImages + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: operator.openshift.io + HasStatus: true + KindName: MachineConfiguration + Labels: {} + PluralName: machineconfigurations + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +networks.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: networks.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AdditionalRoutingCapabilities + - NetworkLiveMigration + - RouteAdvertisements + FilenameOperatorName: network + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_70" + GroupName: operator.openshift.io + HasStatus: true + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +olms.operator.openshift.io: + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1504 + CRDName: olms.operator.openshift.io + Capability: OperatorLifecycleManagerV1 + Category: "" + FeatureGates: + - NewOLM + FilenameOperatorName: operator-lifecycle-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: OLM + Labels: {} + PluralName: olms + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NewOLM + Version: v1 + +openshiftapiservers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: openshiftapiservers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: openshift-apiserver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_30" + GroupName: operator.openshift.io + HasStatus: true + KindName: OpenShiftAPIServer + Labels: {} + PluralName: openshiftapiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +openshiftcontrollermanagers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: openshiftcontrollermanagers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "02" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: OpenShiftControllerManager + Labels: {} + PluralName: openshiftcontrollermanagers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +servicecas.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: servicecas.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: service-ca + FilenameOperatorOrdering: "02" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: ServiceCA + Labels: {} + PluralName: servicecas + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +storages.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/670 + CRDName: storages.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: storage + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Storage + Labels: {} + PluralName: storages + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..582f9686f --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,2131 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_GenerationStatus = map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map_GenerationStatus +} + +var map_MyOperatorResource = map[string]string{ + "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MyOperatorResource) SwaggerDoc() map[string]string { + return map_MyOperatorResource +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentRevision": "currentRevision is the generation of the most recently successful deployment. Can not be set on creation of a nodeStatus. Updates must only increase the value.", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply. Can not be set on creation of a nodeStatus.", + "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", + "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", + "lastFailedReason": "lastFailedReason is a machine readable failure reason string.", + "lastFailedCount": "lastFailedCount is how often the installer pod of the last failed revision failed.", + "lastFallbackCount": "lastFallbackCount is how often a fallback to a previous revision happened.", + "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", + "type": "type of condition in CamelCase or in foo.example.com/CamelCase.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "unsupportedConfigOverrides": "unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.", + "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "version": "version is the level this availability applies to", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", + "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorSpec = map[string]string{ + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", +} + +func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { + return map_StaticPodOperatorSpec +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_Authentication = map[string]string{ + "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationStatus = map[string]string{ + "oauthAPIServer": "oauthAPIServer holds status specific only to oauth-apiserver", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_OAuthAPIServerStatus = map[string]string{ + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { + return map_OAuthAPIServerStatus +} + +var map_CloudCredential = map[string]string{ + "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CloudCredential) SwaggerDoc() map[string]string { + return map_CloudCredential +} + +var map_CloudCredentialList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CloudCredentialList) SwaggerDoc() map[string]string { + return map_CloudCredentialList +} + +var map_CloudCredentialSpec = map[string]string{ + "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", + "credentialsMode": "credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", +} + +func (CloudCredentialSpec) SwaggerDoc() map[string]string { + return map_CloudCredentialSpec +} + +var map_CloudCredentialStatus = map[string]string{ + "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.", +} + +func (CloudCredentialStatus) SwaggerDoc() map[string]string { + return map_CloudCredentialStatus +} + +var map_Config = map[string]string{ + "": "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Config Operator.", + "status": "status defines the observed status of the Config Operator.", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_AddPage = map[string]string{ + "": "AddPage allows customizing actions on the Add page in developer perspective.", + "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.", +} + +func (AddPage) SwaggerDoc() map[string]string { + return map_AddPage +} + +var map_Capability = map[string]string{ + "": "Capabilities contains set of UI capabilities and their state in the console UI.", + "name": "name is the unique name of a capability. Available capabilities are LightspeedButton and GettingStartedBanner.", + "visibility": "visibility defines the visibility state of the capability.", +} + +func (Capability) SwaggerDoc() map[string]string { + return map_Capability +} + +var map_CapabilityVisibility = map[string]string{ + "": "CapabilityVisibility defines the criteria to enable/disable a capability.", + "state": "state defines if the capability is enabled or disabled in the console UI. Enabling the capability in the console UI is represented by the \"Enabled\" value. Disabling the capability in the console UI is represented by the \"Disabled\" value.", +} + +func (CapabilityVisibility) SwaggerDoc() map[string]string { + return map_CapabilityVisibility +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a specific file within a ConfigMap.", + "name": "name is the name of the ConfigMap. name is a required field. Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. Must be at most 253 characters in length.", + "key": "key is the logo key inside the referenced ConfigMap. Must consist only of alphanumeric characters, dashes (-), underscores (_), and periods (.). Must be at most 253 characters in length. Must end in a valid file extension. A valid file extension must consist of a period followed by 2 to 5 alpha characters.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_Console = map[string]string{ + "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleConfigRoute = map[string]string{ + "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED", + "hostname": "hostname is the desired custom domain under which console will be available.", + "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ConsoleConfigRoute) SwaggerDoc() map[string]string { + return map_ConsoleConfigRoute +} + +var map_ConsoleCustomization = map[string]string{ + "": "ConsoleCustomization defines a list of optional configuration for the console UI. Ensure that Logos and CustomLogoFile cannot be set at the same time.", + "logos": "logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. logos is an optional field that allows a list of logos. Only one of logos or customLogoFile can be set at a time. If logos is set, customLogoFile must be unset. When specified, there must be at least one entry and no more than 2 entries. Each type must appear only once in the list.", + "capabilities": "capabilities defines an array of capabilities that can be interacted with in the console UI. Each capability defines a visual state that can be interacted with the console to render in the UI. Available capabilities are LightspeedButton and GettingStartedBanner. Each of the available capabilities may appear only once in the list.", + "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", + "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", + "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a Only one of customLogoFile or logos can be set at a time. ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. The recommended file format for the logo is SVG, but other file formats are allowed if supported by the browser. Deprecated: Use logos instead.", + "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs).", + "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", + "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", + "addPage": "addPage allows customizing actions on the Add page in developer perspective.", + "perspectives": "perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown.", +} + +func (ConsoleCustomization) SwaggerDoc() map[string]string { + return map_ConsoleCustomization +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleProviders = map[string]string{ + "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", + "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", +} + +func (ConsoleProviders) SwaggerDoc() map[string]string { + return map_ConsoleProviders +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", + "customization": "customization is used to optionally provide a small set of customization options to the web console.", + "providers": "providers contains configuration for using specific service providers.", + "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", + "plugins": "plugins defines a list of enabled console plugin names.", + "ingress": "ingress allows to configure the alternative ingress for the console. This field is intended for clusters without ingress capability, where access to routes is not possible.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DeveloperConsoleCatalogCategory = map[string]string{ + "": "DeveloperConsoleCatalogCategory for the developer console catalog.", + "subcategories": "subcategories defines a list of child categories.", +} + +func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategory +} + +var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ + "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", + "id": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "label": "label defines a category display label. It is required and must have 1-64 characters.", + "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", +} + +func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategoryMeta +} + +var map_DeveloperConsoleCatalogCustomization = map[string]string{ + "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.", + "categories": "categories which are shown in the developer catalog.", + "types": "types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown.", +} + +func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCustomization +} + +var map_DeveloperConsoleCatalogTypes = map[string]string{ + "": "DeveloperConsoleCatalogTypes defines the state of the sub-catalog types.", + "state": "state defines if a list of catalog types should be enabled or disabled.", + "enabled": "enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is non-empty, a new type will not be shown to the user until it is added to list. If the list is empty the complete developer catalog will be shown.", + "disabled": "disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.", +} + +func (DeveloperConsoleCatalogTypes) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogTypes +} + +var map_FileReferenceSource = map[string]string{ + "": "FileReferenceSource is used by the console to locate the specified file containing a custom logo.", + "from": "from is a required field to specify the source type of the file reference. Allowed values are ConfigMap. When set to ConfigMap, the file will be sourced from a ConfigMap in the openshift-config namespace. The configMap field must be set when from is set to ConfigMap.", + "configMap": "configMap specifies the ConfigMap sourcing details such as the name of the ConfigMap and the key for the file. The ConfigMap must exist in the openshift-config namespace. Required when from is \"ConfigMap\", and forbidden otherwise.", +} + +func (FileReferenceSource) SwaggerDoc() map[string]string { + return map_FileReferenceSource +} + +var map_Ingress = map[string]string{ + "": "Ingress allows cluster admin to configure alternative ingress for the console.", + "consoleURL": "consoleURL is a URL to be used as the base console address. If not specified, the console route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. Make sure that appropriate ingress is set up at this URL. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", + "clientDownloadsURL": "clientDownloadsURL is a URL to be used as the address to download client binaries. If not specified, the downloads route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_Logo = map[string]string{ + "": "Logo defines a configuration based on theme modes for the console UI logo.", + "type": "type specifies the type of the logo for the console UI. It determines whether the logo is for the masthead or favicon. type is a required field that allows values of Masthead and Favicon. When set to \"Masthead\", the logo will be used in the masthead and about modal of the console UI. When set to \"Favicon\", the logo will be used as the favicon of the console UI.", + "themes": "themes specifies the themes for the console UI logo. themes is a required field that allows a list of themes. Each item in the themes list must have a unique mode and a source field. Each mode determines whether the logo is for the dark or light mode of the console UI. If a theme is not specified, the default OpenShift logo will be displayed for that theme. There must be at least one entry and no more than 2 entries.", +} + +func (Logo) SwaggerDoc() map[string]string { + return map_Logo +} + +var map_Perspective = map[string]string{ + "": "Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown", + "id": "id defines the id of the perspective. Example: \"dev\", \"admin\". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.", + "visibility": "visibility defines the state of perspective along with access review checks if needed for that perspective.", + "pinnedResources": "pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. The list of available Kubernetes resources could be read via `kubectl api-resources`. The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. Incorrect or unknown resources will be ignored.", +} + +func (Perspective) SwaggerDoc() map[string]string { + return map_Perspective +} + +var map_PerspectiveVisibility = map[string]string{ + "": "PerspectiveVisibility defines the criteria to show/hide a perspective", + "state": "state defines the perspective is enabled or disabled or access review check is required.", + "accessReview": "accessReview defines required and missing access review checks.", +} + +func (PerspectiveVisibility) SwaggerDoc() map[string]string { + return map_PerspectiveVisibility +} + +var map_PinnedResourceReference = map[string]string{ + "": "PinnedResourceReference includes the group, version and type of resource", + "group": "group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: \"\", \"apps\", \"build.openshift.io\", etc.", + "version": "version is the API Version of the Resource. This value should consist of only lowercase alphanumeric characters. Example: \"v1\", \"v1beta1\", etc.", + "resource": "resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", +} + +func (PinnedResourceReference) SwaggerDoc() map[string]string { + return map_PinnedResourceReference +} + +var map_ProjectAccess = map[string]string{ + "": "ProjectAccess contains options for project access roles", + "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.", +} + +func (ProjectAccess) SwaggerDoc() map[string]string { + return map_ProjectAccess +} + +var map_QuickStarts = map[string]string{ + "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.", + "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.", +} + +func (QuickStarts) SwaggerDoc() map[string]string { + return map_QuickStarts +} + +var map_ResourceAttributesAccessReview = map[string]string{ + "": "ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks. `required` and `missing` can work together esp. in the case where the cluster admin wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty.", + "required": "required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list.", + "missing": "missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list.", +} + +func (ResourceAttributesAccessReview) SwaggerDoc() map[string]string { + return map_ResourceAttributesAccessReview +} + +var map_StatuspageProvider = map[string]string{ + "": "StatuspageProvider provides identity for statuspage account.", + "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", +} + +func (StatuspageProvider) SwaggerDoc() map[string]string { + return map_StatuspageProvider +} + +var map_Theme = map[string]string{ + "": "Theme defines a theme mode for the console UI.", + "mode": "mode is used to specify what theme mode a logo will apply to in the console UI. mode is a required field that allows values of Dark and Light. When set to Dark, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Dark mode. When set to Light, the logo file referenced in the 'file' field will be used when an end-user of the console UI enables the Light mode.", + "source": "source is used by the console to locate the specified file containing a custom logo. source is a required field that references a ConfigMap name and key that contains the custom logo file in the openshift-config namespace. You can create it with a command like: - 'oc create configmap custom-logos-config --namespace=openshift-config --from-file=/path/to/file' The ConfigMap key must include the file extension so that the console serves the file with the correct MIME type. The recommended file format for the Masthead and Favicon logos is SVG, but other file formats are allowed if supported by the browser. The logo image size must be less than 1 MB due to constraints on the ConfigMap size. For more information, see the documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/web_console/customizing-web-console#customizing-web-console", +} + +func (Theme) SwaggerDoc() map[string]string { + return map_Theme +} + +var map_AWSCSIDriverConfigSpec = map[string]string{ + "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", + "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", + "efsVolumeMetrics": "efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver.", +} + +func (AWSCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AWSCSIDriverConfigSpec +} + +var map_AWSEFSVolumeMetrics = map[string]string{ + "": "AWSEFSVolumeMetrics defines the configuration for volume metrics in the EFS CSI Driver.", + "state": "state defines the state of metric collection in the AWS EFS CSI Driver. This field is required and must be set to one of the following values: Disabled or RecursiveWalk. Disabled means no metrics collection will be performed. This is the default value. RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. This process may result in high CPU and memory usage, depending on the volume size.", + "recursiveWalk": "recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver when the state is set to RecursiveWalk.", +} + +func (AWSEFSVolumeMetrics) SwaggerDoc() map[string]string { + return map_AWSEFSVolumeMetrics +} + +var map_AWSEFSVolumeMetricsRecursiveWalkConfig = map[string]string{ + "": "AWSEFSVolumeMetricsRecursiveWalkConfig defines options for volume metrics in the EFS CSI Driver.", + "refreshPeriodMinutes": "refreshPeriodMinutes specifies the frequency, in minutes, at which volume metrics are refreshed. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 240. The valid range is from 1 to 43200 minutes (30 days).", + "fsRateLimit": "fsRateLimit defines the rate limit, in goroutines per file system, for processing volume metrics. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 5. The valid range is from 1 to 100 goroutines.", +} + +func (AWSEFSVolumeMetricsRecursiveWalkConfig) SwaggerDoc() map[string]string { + return map_AWSEFSVolumeMetricsRecursiveWalkConfig +} + +var map_AzureCSIDriverConfigSpec = map[string]string{ + "": "AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.", + "diskEncryptionSet": "diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys.", +} + +func (AzureCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AzureCSIDriverConfigSpec +} + +var map_AzureDiskEncryptionSet = map[string]string{ + "": "AzureDiskEncryptionSet defines the configuration for a disk encryption set.", + "subscriptionID": "subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378", + "resourceGroup": "resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length.", + "name": "name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length.", +} + +func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string { + return map_AzureDiskEncryptionSet +} + +var map_CSIDriverConfigSpec = map[string]string{ + "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.", + "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", + "aws": "aws is used to configure the AWS CSI driver.", + "azure": "azure is used to configure the Azure CSI driver.", + "gcp": "gcp is used to configure the GCP CSI driver.", + "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.", + "vSphere": "vSphere is used to configure the vsphere CSI driver.", +} + +func (CSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_CSIDriverConfigSpec +} + +var map_ClusterCSIDriver = map[string]string{ + "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterCSIDriver) SwaggerDoc() map[string]string { + return map_ClusterCSIDriver +} + +var map_ClusterCSIDriverList = map[string]string{ + "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterCSIDriverList) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverList +} + +var map_ClusterCSIDriverSpec = map[string]string{ + "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", + "storageClassState": "storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", + "driverConfig": "driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverSpec +} + +var map_ClusterCSIDriverStatus = map[string]string{ + "": "ClusterCSIDriverStatus is the observed status of CSI driver operator", +} + +func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverStatus +} + +var map_GCPCSIDriverConfigSpec = map[string]string{ + "": "GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.", + "kmsKey": "kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP.", +} + +func (GCPCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_GCPCSIDriverConfigSpec +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited.", + "location": "location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or \"global\". Defaults to global, if not set.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + +var map_IBMCloudCSIDriverConfigSpec = map[string]string{ + "": "IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver.", + "encryptionKeyCRN": "encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes.", +} + +func (IBMCloudCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_IBMCloudCSIDriverConfigSpec +} + +var map_VSphereCSIDriverConfigSpec = map[string]string{ + "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", + "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", + "globalMaxSnapshotsPerBlockVolume": "globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. Snapshots can not be disabled using this parameter. Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html", + "granularMaxSnapshotsPerBlockVolumeInVSAN": "granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VSAN can not be disabled using this parameter.", + "granularMaxSnapshotsPerBlockVolumeInVVOL": "granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VVOL can not be disabled using this parameter.", + "maxAllowedBlockVolumesPerNode": "maxAllowedBlockVolumesPerNode is an optional configuration parameter that allows setting a custom value for the limit of the number of PersistentVolumes attached to a node. In vSphere version 7 this limit was set to 59 by default, however in vSphere version 8 this limit was increased to 255. Before increasing this value above 59 the cluster administrator needs to ensure that every node forming the cluster is updated to ESXi version 8 or higher and that all nodes are running the same version. The limit must be between 1 and 255, which matches the vSphere version 8 maximum. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 59, which matches the limit for vSphere version 7.", +} + +func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_VSphereCSIDriverConfigSpec +} + +var map_CSISnapshotController = map[string]string{ + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (CSISnapshotController) SwaggerDoc() map[string]string { + return map_CSISnapshotController +} + +var map_CSISnapshotControllerList = map[string]string{ + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CSISnapshotControllerList) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerList +} + +var map_CSISnapshotControllerSpec = map[string]string{ + "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerSpec +} + +var map_CSISnapshotControllerStatus = map[string]string{ + "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerStatus +} + +var map_DNS = map[string]string{ + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSCache = map[string]string{ + "": "DNSCache defines the fields for configuring DNS caching.", + "positiveTTL": "positiveTTL is optional and specifies the amount of time that a positive response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject to change.", + "negativeTTL": "negativeTTL is optional and specifies the amount of time that a negative response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject to change.", +} + +func (DNSCache) SwaggerDoc() map[string]string { + return map_DNSCache +} + +var map_DNSList = map[string]string{ + "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSNodePlacement = map[string]string{ + "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", + "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", +} + +func (DNSNodePlacement) SwaggerDoc() map[string]string { + return map_DNSNodePlacement +} + +var map_DNSOverTLSConfig = map[string]string{ + "": "DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.", + "serverName": "serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to \"TLS\". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s).", + "caBundle": "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers.\n\n1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName.", +} + +func (DNSOverTLSConfig) SwaggerDoc() map[string]string { + return map_DNSOverTLSConfig +} + +var map_DNSSpec = map[string]string{ + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", + "upstreamResolvers": "upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (\".\") server\n\nIf this field is not specified, the upstream used will default to /etc/resolv.conf, with policy \"sequential\"", + "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", + "managementState": "managementState indicates whether the DNS operator should manage cluster DNS", + "operatorLogLevel": "operatorLogLevel controls the logging level of the DNS Operator. Valid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\". setting operatorLogLevel: Trace will produce extremely verbose logs.", + "logLevel": "logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses.\n Setting logLevel: Trace will produce extremely verbose logs.\nValid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\".", + "cache": "cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSStatus = map[string]string{ + "": "DNSStatus defines the observed status of the DNS.", + "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", + "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", +} + +func (DNSStatus) SwaggerDoc() map[string]string { + return map_DNSStatus +} + +var map_DNSTransportConfig = map[string]string{ + "": "DNSTransportConfig groups related configuration parameters used for configuring forwarding to upstream resolvers that support DNS-over-TLS.", + "transport": "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s).\n\nPossible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1.", + "tls": "tls contains the additional configuration options to use when Transport is set to \"TLS\".", +} + +func (DNSTransportConfig) SwaggerDoc() map[string]string { + return map_DNSTransportConfig +} + +var map_ForwardPlugin = map[string]string{ + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", +} + +func (ForwardPlugin) SwaggerDoc() map[string]string { + return map_ForwardPlugin +} + +var map_Server = map[string]string{ + "": "Server defines the schema for a server that runs per instance of CoreDNS.", + "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", + "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", + "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", +} + +func (Server) SwaggerDoc() map[string]string { + return map_Server +} + +var map_Upstream = map[string]string{ + "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", + "type": "type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", + "address": "address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", + "port": "port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", +} + +func (Upstream) SwaggerDoc() map[string]string { + return map_Upstream +} + +var map_UpstreamResolvers = map[string]string{ + "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", + "upstreams": "upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", +} + +func (UpstreamResolvers) SwaggerDoc() map[string]string { + return map_UpstreamResolvers +} + +var map_Etcd = map[string]string{ + "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Etcd) SwaggerDoc() map[string]string { + return map_Etcd +} + +var map_EtcdList = map[string]string{ + "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (EtcdList) SwaggerDoc() map[string]string { + return map_EtcdList +} + +var map_EtcdSpec = map[string]string{ + "controlPlaneHardwareSpeed": "HardwareSpeed allows user to change the etcd tuning profile which configures the latency parameters for heartbeat interval and leader election timeouts allowing the cluster to tolerate longer round-trip-times between etcd members. Valid values are \"\", \"Standard\" and \"Slower\".\n\t\"\" means no opinion and the platform is left to choose a reasonable default\n\twhich is subject to change without notice.", + "backendQuotaGiB": "backendQuotaGiB sets the etcd backend storage size limit in gibibytes. The value should be an integer not less than 8 and not more than 32. When not specified, the default value is 8.", +} + +func (EtcdSpec) SwaggerDoc() map[string]string { + return map_EtcdSpec +} + +var map_AWSClassicLoadBalancerParameters = map[string]string{ + "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.", + "connectionIdleTimeout": "connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change.", + "subnets": "subnets specifies the subnets to which the load balancer will attach. The subnets may be specified by either their ID or name. The total number of subnets is limited to 10.\n\nIn order for the load balancer to be provisioned with subnets, each subnet must exist, each subnet must be from a different availability zone, and the load balancer service must be recreated to pick up new values.\n\nWhen omitted from the spec, the subnets will be auto-discovered for each availability zone. Auto-discovered subnets are not reported in the status of the IngressController object.", +} + +func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSClassicLoadBalancerParameters +} + +var map_AWSLoadBalancerParameters = map[string]string{ + "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.", + "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", + "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.", + "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.", +} + +func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSLoadBalancerParameters +} + +var map_AWSNetworkLoadBalancerParameters = map[string]string{ + "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html", + "subnets": "subnets specifies the subnets to which the load balancer will attach. The subnets may be specified by either their ID or name. The total number of subnets is limited to 10.\n\nIn order for the load balancer to be provisioned with subnets, each subnet must exist, each subnet must be from a different availability zone, and the load balancer service must be recreated to pick up new values.\n\nWhen omitted from the spec, the subnets will be auto-discovered for each availability zone. Auto-discovered subnets are not reported in the status of the IngressController object.", + "eipAllocations": "eipAllocations is a list of IDs for Elastic IP (EIP) addresses that are assigned to the Network Load Balancer. The following restrictions apply:\n\neipAllocations can only be used with external scope, not internal. An EIP can be allocated to only a single IngressController. The number of EIP allocations must match the number of subnets that are used for the load balancer. Each EIP allocation must be unique. A maximum of 10 EIP allocations are permitted.\n\nSee https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html for general information about configuration, characteristics, and limitations of Elastic IP addresses.", +} + +func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSNetworkLoadBalancerParameters +} + +var map_AWSSubnets = map[string]string{ + "": "AWSSubnets contains a list of references to AWS subnets by ID or name.", + "ids": "ids specifies a list of AWS subnets by subnet ID. Subnet IDs must start with \"subnet-\", consist only of alphanumeric characters, must be exactly 24 characters long, must be unique, and the total number of subnets specified by ids and names must not exceed 10.", + "names": "names specifies a list of AWS subnets by subnet name. Subnet names must not start with \"subnet-\", must not include commas, must be under 256 characters in length, must be unique, and the total number of subnets specified by ids and names must not exceed 10.", +} + +func (AWSSubnets) SwaggerDoc() map[string]string { + return map_AWSSubnets +} + +var map_AccessLogging = map[string]string{ + "": "AccessLogging describes how client requests should be logged.", + "destination": "destination is where access logs go.", + "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.", + "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.", + "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.", + "logEmptyRequests": "logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are \"Log\" and \"Ignore\". The default value is \"Log\".", +} + +func (AccessLogging) SwaggerDoc() map[string]string { + return map_AccessLogging +} + +var map_ClientTLS = map[string]string{ + "": "ClientTLS specifies TLS configuration to enable client-to-server authentication, which can be used for mutual TLS.", + "clientCertificatePolicy": "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\".\n\nNote that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes.", + "clientCA": "clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace.", + "allowedSubjectPatterns": "allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection.", +} + +func (ClientTLS) SwaggerDoc() map[string]string { + return map_ClientTLS +} + +var map_ContainerLoggingDestinationParameters = map[string]string{ + "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 8192, inclusive.\n\nWhen omitted, the default value is 1024.", +} + +func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_ContainerLoggingDestinationParameters +} + +var map_EndpointPublishingStrategy = map[string]string{ + "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", + "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", + "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", + "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", + "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", +} + +func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { + return map_EndpointPublishingStrategy +} + +var map_GCPLoadBalancerParameters = map[string]string{ + "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.", + "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access", +} + +func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_GCPLoadBalancerParameters +} + +var map_HTTPCompressionPolicy = map[string]string{ + "": "httpCompressionPolicy turns on compression for the specified MIME types.\n\nThis field is optional, and its absence implies that compression should not be enabled globally in HAProxy.\n\nIf httpCompressionPolicy exists, compression should be enabled only for the specified MIME types.", + "mimeTypes": "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression.\n\nNote: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2", +} + +func (HTTPCompressionPolicy) SwaggerDoc() map[string]string { + return map_HTTPCompressionPolicy +} + +var map_HostNetworkStrategy = map[string]string{ + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", + "httpPort": "httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80.", + "httpsPort": "httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443.", + "statsPort": "statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936.", +} + +func (HostNetworkStrategy) SwaggerDoc() map[string]string { + return map_HostNetworkStrategy +} + +var map_IBMLoadBalancerParameters = map[string]string{ + "": "IBMLoadBalancerParameters provides configuration settings that are specific to IBM Cloud load balancers.", + "protocol": "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\"\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nValid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled.", +} + +func (IBMLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_IBMLoadBalancerParameters +} + +var map_IngressController = map[string]string{ + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", +} + +func (IngressController) SwaggerDoc() map[string]string { + return map_IngressController +} + +var map_IngressControllerCaptureHTTPCookie = map[string]string{ + "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.", + "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookie +} + +var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{ + "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.", + "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.", + "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", + "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", +} + +func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookieUnion +} + +var map_IngressControllerCaptureHTTPHeader = map[string]string{ + "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.", + "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.", + "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeader +} + +var map_IngressControllerCaptureHTTPHeaders = map[string]string{ + "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.", + "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.", + "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.", +} + +func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeaders +} + +var map_IngressControllerHTTPHeader = map[string]string{ + "": "IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header.", + "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.", + "action": "action specifies actions to perform on headers, such as setting or deleting headers.", +} + +func (IngressControllerHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeader +} + +var map_IngressControllerHTTPHeaderActionUnion = map[string]string{ + "": "IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header.", + "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.", + "set": "set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise.", +} + +func (IngressControllerHTTPHeaderActionUnion) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActionUnion +} + +var map_IngressControllerHTTPHeaderActions = map[string]string{ + "": "IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers.", + "response": "response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\".", + "request": "request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". ", +} + +func (IngressControllerHTTPHeaderActions) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActions +} + +var map_IngressControllerHTTPHeaders = map[string]string{ + "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", + "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", + "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", + "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", + "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.", +} + +func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaders +} + +var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{ + "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.", + "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.", + "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", +} + +func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPUniqueIdHeaderPolicy +} + +var map_IngressControllerList = map[string]string{ + "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (IngressControllerList) SwaggerDoc() map[string]string { + return map_IngressControllerList +} + +var map_IngressControllerLogging = map[string]string{ + "": "IngressControllerLogging describes what should be logged where.", + "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.", +} + +func (IngressControllerLogging) SwaggerDoc() map[string]string { + return map_IngressControllerLogging +} + +var map_IngressControllerSetHTTPHeader = map[string]string{ + "": "IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header.", + "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ", +} + +func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerSetHTTPHeader +} + +var map_IngressControllerSpec = map[string]string{ + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", + "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", + "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", + "idleConnectionTerminationPolicy": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires\n (300 seconds in OpenShift's configuration, not\n configurable).\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", +} + +func (IngressControllerSpec) SwaggerDoc() map[string]string { + return map_IngressControllerSpec +} + +var map_IngressControllerStatus = map[string]string{ + "": "IngressControllerStatus defines the observed status of the IngressController.", + "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", + "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", + "domain": "domain is the actual domain in use.", + "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", + "observedGeneration": "observedGeneration is the most recent generation observed.", + "namespaceSelector": "namespaceSelector is the actual namespaceSelector in use.", + "routeSelector": "routeSelector is the actual routeSelector in use.", +} + +func (IngressControllerStatus) SwaggerDoc() map[string]string { + return map_IngressControllerStatus +} + +var map_IngressControllerTuningOptions = map[string]string{ + "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods", + "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.", + "clientTimeout": "clientTimeout defines how long a connection will be held open while waiting for a client response.\n\nIf unset, the default timeout is 30s", + "clientFinTimeout": "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection.\n\nIf unset, the default timeout is 1s", + "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s", + "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", + "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", + "connectTimeout": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", + "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", + "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", + "reloadInterval": "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly.\n\nThe value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default).\n\nA zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nNote: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload.", +} + +func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { + return map_IngressControllerTuningOptions +} + +var map_LoadBalancerStrategy = map[string]string{ + "": "LoadBalancerStrategy holds parameters for a load balancer.", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", + "allowedSourceRanges": "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses.\n\nTo facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12.", + "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.", + "dnsManagementPolicy": "dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.", +} + +func (LoadBalancerStrategy) SwaggerDoc() map[string]string { + return map_LoadBalancerStrategy +} + +var map_LoggingDestination = map[string]string{ + "": "LoggingDestination describes a destination for log messages.", + "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", + "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", + "container": "container holds parameters for the Container logging destination. Present only if type is Container.", +} + +func (LoggingDestination) SwaggerDoc() map[string]string { + return map_LoggingDestination +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for an ingress controller.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf set, the specified selector is used and replaces the default.\n\nIf unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nWhen defaultPlacement is Workers, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nWhen defaultPlacement is ControlPlane, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/master: ''\n\nThese defaults are subject to change.\n\nNote that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_NodePortStrategy = map[string]string{ + "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (NodePortStrategy) SwaggerDoc() map[string]string { + return map_NodePortStrategy +} + +var map_OpenStackLoadBalancerParameters = map[string]string{ + "": "OpenStackLoadBalancerParameters provides configuration settings that are specific to OpenStack load balancers.", + "floatingIP": "floatingIP specifies the IP address that the load balancer will use. When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. When specified, the floating IP has to be pre-created. If the specified value is not a floating IP or is already claimed, the OpenStack cloud provider won't be able to provision the load balancer. This field may only be used if the IngressController has External scope. This value must be a valid IPv4 or IPv6 address. ", +} + +func (OpenStackLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_OpenStackLoadBalancerParameters +} + +var map_PrivateStrategy = map[string]string{ + "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (PrivateStrategy) SwaggerDoc() map[string]string { + return map_PrivateStrategy +} + +var map_ProviderLoadBalancerParameters = map[string]string{ + "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"IBM\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", + "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", + "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", + "ibm": "ibm provides configuration settings that are specific to IBM Cloud load balancers.\n\nIf empty, defaults will be applied. See specific ibm fields for details about their defaults.", + "openstack": "openstack provides configuration settings that are specific to OpenStack load balancers.\n\nIf empty, defaults will be applied. See specific openstack fields for details about their defaults.", +} + +func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_ProviderLoadBalancerParameters +} + +var map_RouteAdmissionPolicy = map[string]string{ + "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", + "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", + "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".", +} + +func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { + return map_RouteAdmissionPolicy +} + +var map_SyslogLoggingDestinationParameters = map[string]string{ + "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", + "address": "address is the IP address of the syslog endpoint that receives log messages.", + "port": "port is the UDP port number of the syslog endpoint that receives log messages.", + "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 4096, inclusive.\n\nWhen omitted, the default value is 1024.", +} + +func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_SyslogLoggingDestinationParameters +} + +var map_GatherStatus = map[string]string{ + "": "gatherStatus provides information about the last known gather event.", + "lastGatherTime": "lastGatherTime is the last time when Insights data gathering finished. An empty value means that no data has been gathered yet.", + "lastGatherDuration": "lastGatherDuration is the total time taken to process all gatherers during the last gather event.", + "gatherers": "gatherers is a list of active gatherers (and their statuses) in the last gathering.", +} + +func (GatherStatus) SwaggerDoc() map[string]string { + return map_GatherStatus +} + +var map_GathererStatus = map[string]string{ + "": "gathererStatus represents information about a particular data gatherer.", + "conditions": "conditions provide details on the status of each gatherer.", + "name": "name is the name of the gatherer.", + "lastGatherDuration": "lastGatherDuration represents the time spent gathering.", +} + +func (GathererStatus) SwaggerDoc() map[string]string { + return map_GathererStatus +} + +var map_HealthCheck = map[string]string{ + "": "healthCheck represents an Insights health check attributes.", + "description": "description provides basic description of the healtcheck.", + "totalRisk": "totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue.", + "advisorURI": "advisorURI provides the URL link to the Insights Advisor.", + "state": "state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface.", +} + +func (HealthCheck) SwaggerDoc() map[string]string { + return map_HealthCheck +} + +var map_InsightsOperator = map[string]string{ + "": "\n\nInsightsOperator holds cluster-wide information about the Insights Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Insights.", + "status": "status is the most recently observed status of the Insights operator.", +} + +func (InsightsOperator) SwaggerDoc() map[string]string { + return map_InsightsOperator +} + +var map_InsightsOperatorList = map[string]string{ + "": "InsightsOperatorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InsightsOperatorList) SwaggerDoc() map[string]string { + return map_InsightsOperatorList +} + +var map_InsightsOperatorStatus = map[string]string{ + "gatherStatus": "gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet.", + "insightsReport": "insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet.", +} + +func (InsightsOperatorStatus) SwaggerDoc() map[string]string { + return map_InsightsOperatorStatus +} + +var map_InsightsReport = map[string]string{ + "": "insightsReport provides Insights health check report based on the most recently sent Insights data.", + "downloadedAt": "downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled).", + "healthChecks": "healthChecks provides basic information about active Insights health checks in a cluster.", +} + +func (InsightsReport) SwaggerDoc() map[string]string { + return map_InsightsReport +} + +var map_KubeAPIServer = map[string]string{ + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", +} + +func (KubeAPIServer) SwaggerDoc() map[string]string { + return map_KubeAPIServer +} + +var map_KubeAPIServerList = map[string]string{ + "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeAPIServerList) SwaggerDoc() map[string]string { + return map_KubeAPIServerList +} + +var map_KubeAPIServerStatus = map[string]string{ + "serviceAccountIssuers": "serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection", +} + +func (KubeAPIServerStatus) SwaggerDoc() map[string]string { + return map_KubeAPIServerStatus +} + +var map_ServiceAccountIssuerStatus = map[string]string{ + "name": "name is the name of the service account issuer", + "expirationTime": "expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list of service account issuers.", +} + +func (ServiceAccountIssuerStatus) SwaggerDoc() map[string]string { + return map_ServiceAccountIssuerStatus +} + +var map_KubeControllerManager = map[string]string{ + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", +} + +func (KubeControllerManager) SwaggerDoc() map[string]string { + return map_KubeControllerManager +} + +var map_KubeControllerManagerList = map[string]string{ + "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeControllerManagerList) SwaggerDoc() map[string]string { + return map_KubeControllerManagerList +} + +var map_KubeControllerManagerSpec = map[string]string{ + "useMoreSecureServiceCA": "useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content.", +} + +func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { + return map_KubeControllerManagerSpec +} + +var map_KubeStorageVersionMigrator = map[string]string{ + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigrator +} + +var map_KubeStorageVersionMigratorList = map[string]string{ + "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigratorList +} + +var map_MachineConfiguration = map[string]string{ + "": "MachineConfiguration provides information to configure an operator to manage Machine Configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Machine Config Operator", + "status": "status is the most recently observed status of the Machine Config Operator", +} + +func (MachineConfiguration) SwaggerDoc() map[string]string { + return map_MachineConfiguration +} + +var map_MachineConfigurationList = map[string]string{ + "": "MachineConfigurationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (MachineConfigurationList) SwaggerDoc() map[string]string { + return map_MachineConfigurationList +} + +var map_MachineConfigurationSpec = map[string]string{ + "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", + "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", +} + +func (MachineConfigurationSpec) SwaggerDoc() map[string]string { + return map_MachineConfigurationSpec +} + +var map_MachineConfigurationStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", + "managedBootImagesStatus": "managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is and will be used by Machine Config Controller while performing boot image updates.", +} + +func (MachineConfigurationStatus) SwaggerDoc() map[string]string { + return map_MachineConfigurationStatus +} + +var map_MachineManager = map[string]string{ + "": "MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information such as the resource type and the API Group of the resource. It also provides granular control via the selection field.", + "resource": "resource is the machine management resource's type. The only current valid value is machinesets. machinesets means that the machine manager will only register resources of the kind MachineSet.", + "apiGroup": "apiGroup is name of the APIGroup that the machine management resource belongs to. The only current valid value is machine.openshift.io. machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group.", + "selection": "selection allows granular control of the machine management resources that will be registered for boot image updates.", +} + +func (MachineManager) SwaggerDoc() map[string]string { + return map_MachineManager +} + +var map_MachineManagerSelector = map[string]string{ + "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. None means that every resource matched by the machine manager will not be updated.", + "partial": "partial provides label selector(s) that can be used to match machine management resources. Only permitted when mode is set to \"Partial\".", +} + +func (MachineManagerSelector) SwaggerDoc() map[string]string { + return map_MachineManagerSelector +} + +var map_ManagedBootImages = map[string]string{ + "machineManagers": "machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator will watch for changes to this list. Only one entry is permitted per type of machine management resource.", +} + +func (ManagedBootImages) SwaggerDoc() map[string]string { + return map_ManagedBootImages +} + +var map_NodeDisruptionPolicyClusterStatus = map[string]string{ + "": "NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a merge of cluster defaults and user provided policies", + "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths", + "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services", + "sshkey": "sshkey is the overall sshkey MachineConfig definition", +} + +func (NodeDisruptionPolicyClusterStatus) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyClusterStatus +} + +var map_NodeDisruptionPolicyConfig = map[string]string{ + "": "NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys", + "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths This list supports a maximum of 50 entries.", + "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services This list supports a maximum of 50 entries.", + "sshkey": "sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this will apply to all sshkey changes in the cluster", +} + +func (NodeDisruptionPolicyConfig) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyConfig +} + +var map_NodeDisruptionPolicySpecAction = map[string]string{ + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "reload": "reload specifies the service to reload, only valid if type is reload", + "restart": "restart specifies the service to restart, only valid if type is restart", +} + +func (NodeDisruptionPolicySpecAction) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecAction +} + +var map_NodeDisruptionPolicySpecFile = map[string]string{ + "": "NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object", + "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecFile) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecFile +} + +var map_NodeDisruptionPolicySpecSSHKey = map[string]string{ + "": "NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecSSHKey) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecSSHKey +} + +var map_NodeDisruptionPolicySpecUnit = map[string]string{ + "": "NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object", + "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecUnit) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecUnit +} + +var map_NodeDisruptionPolicyStatus = map[string]string{ + "clusterPolicies": "clusterPolicies is a merge of cluster default and user provided node disruption policies.", +} + +func (NodeDisruptionPolicyStatus) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatus +} + +var map_NodeDisruptionPolicyStatusAction = map[string]string{ + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "reload": "reload specifies the service to reload, only valid if type is reload", + "restart": "restart specifies the service to restart, only valid if type is restart", +} + +func (NodeDisruptionPolicyStatusAction) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusAction +} + +var map_NodeDisruptionPolicyStatusFile = map[string]string{ + "": "NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object", + "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusFile) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusFile +} + +var map_NodeDisruptionPolicyStatusSSHKey = map[string]string{ + "": "NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusSSHKey) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusSSHKey +} + +var map_NodeDisruptionPolicyStatusUnit = map[string]string{ + "": "NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object", + "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusUnit) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusUnit +} + +var map_PartialSelector = map[string]string{ + "": "PartialSelector provides label selector(s) that can be used to match machine management resources.", + "machineResourceSelector": "machineResourceSelector is a label selector that can be used to select machine resources like MachineSets.", +} + +func (PartialSelector) SwaggerDoc() map[string]string { + return map_PartialSelector +} + +var map_ReloadService = map[string]string{ + "": "ReloadService allows the user to specify the services to be reloaded", + "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be reloaded Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", +} + +func (ReloadService) SwaggerDoc() map[string]string { + return map_ReloadService +} + +var map_RestartService = map[string]string{ + "": "RestartService allows the user to specify the services to be restarted", + "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be restarted Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", +} + +func (RestartService) SwaggerDoc() map[string]string { + return map_RestartService +} + +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + "simpleMacvlanConfig": "simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_AdditionalRoutingCapabilities = map[string]string{ + "": "AdditionalRoutingCapabilities describes components and relevant configuration providing advanced routing capabilities.", + "providers": "providers is a set of enabled components that provide additional routing capabilities. Entries on this list must be unique. The only valid value is currrently \"FRR\" which provides FRR routing capabilities through the deployment of FRR.", +} + +func (AdditionalRoutingCapabilities) SwaggerDoc() map[string]string { + return map_AdditionalRoutingCapabilities +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", + "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_EgressIPConfig = map[string]string{ + "": "EgressIPConfig defines the configuration knobs for egressip", + "reachabilityTotalTimeoutSeconds": "reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check.", +} + +func (EgressIPConfig) SwaggerDoc() map[string]string { + return map_EgressIPConfig +} + +var map_ExportNetworkFlows = map[string]string{ + "netFlow": "netFlow defines the NetFlow configuration.", + "sFlow": "sFlow defines the SFlow configuration.", + "ipfix": "ipfix defines IPFIX configuration.", +} + +func (ExportNetworkFlows) SwaggerDoc() map[string]string { + return map_ExportNetworkFlows +} + +var map_FeaturesMigration = map[string]string{ + "egressIP": "egressIP specified whether or not the Egress IP configuration was migrated. DEPRECATED: network type migration is no longer supported.", + "egressFirewall": "egressFirewall specified whether or not the Egress Firewall configuration was migrated. DEPRECATED: network type migration is no longer supported.", + "multicast": "multicast specified whether or not the multicast configuration was migrated. DEPRECATED: network type migration is no longer supported.", +} + +func (FeaturesMigration) SwaggerDoc() map[string]string { + return map_FeaturesMigration +} + +var map_GatewayConfig = map[string]string{ + "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", + "routingViaHost": "routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "ipForwarding": "ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", + "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values.", + "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values.", +} + +func (GatewayConfig) SwaggerDoc() map[string]string { + return map_GatewayConfig +} + +var map_HybridOverlayConfig = map[string]string{ + "hybridClusterNetwork": "hybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", +} + +func (HybridOverlayConfig) SwaggerDoc() map[string]string { + return map_HybridOverlayConfig +} + +var map_IPAMConfig = map[string]string{ + "": "IPAMConfig contains configurations for IPAM (IP Address Management)", + "type": "type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", +} + +func (IPAMConfig) SwaggerDoc() map[string]string { + return map_IPAMConfig +} + +var map_IPFIXConfig = map[string]string{ + "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (IPFIXConfig) SwaggerDoc() map[string]string { + return map_IPFIXConfig +} + +var map_IPsecConfig = map[string]string{ + "mode": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.", + "full": "full defines configuration parameters for the IPsec `Full` mode. This is permitted only when mode is configured with `Full`, and forbidden otherwise.", +} + +func (IPsecConfig) SwaggerDoc() map[string]string { + return map_IPsecConfig +} + +var map_IPsecFullModeConfig = map[string]string{ + "": "IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode.", + "encapsulation": "encapsulation option to configure libreswan on how inter-pod traffic across nodes are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 for the encapsulation. Valid values are Always, Auto and omitted. Always means enable UDP encapsulation regardless of whether NAT is detected. Auto means enable UDP encapsulation based on the detection of NAT. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Auto.", +} + +func (IPsecFullModeConfig) SwaggerDoc() map[string]string { + return map_IPsecFullModeConfig +} + +var map_IPv4GatewayConfig = map[string]string{ + "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.0.0/17 The value must be in proper IPV4 CIDR format", +} + +func (IPv4GatewayConfig) SwaggerDoc() map[string]string { + return map_IPv4GatewayConfig +} + +var map_IPv4OVNKubernetesConfig = map[string]string{ + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The current default value is 100.64.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", +} + +func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_IPv4OVNKubernetesConfig +} + +var map_IPv6GatewayConfig = map[string]string{ + "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/112 Note that IPV6 dual addresses are not permitted", +} + +func (IPv6GatewayConfig) SwaggerDoc() map[string]string { + return map_IPv6GatewayConfig +} + +var map_IPv6OVNKubernetesConfig = map[string]string{ + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accommodate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The subnet must be large enough to accommodate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", +} + +func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_IPv6OVNKubernetesConfig +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset.", + "machine": "machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_NetFlowConfig = map[string]string{ + "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items", +} + +func (NetFlowConfig) SwaggerDoc() map[string]string { + return map_NetFlowConfig +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network migration configuration.", + "mtu": "mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected.", + "networkType": "networkType was previously used when changing the default network type. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", + "features": "features was previously used to configure which network plugin features would be migrated in a network type migration. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", + "mode": "mode indicates the mode of network type migration. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork defaults to 'false' and this setting enables the pod multi-networking capability. disableMultiNetwork when set to 'true' at cluster install time does not install the components, typically the Multus CNI and the network-attachment-definition CRD, that enable the pod multi-networking capability. Setting the parameter to 'true' might be useful when you need install third-party CNI plugins, but these plugins are not supported by Red Hat. Changing the parameter value as a postinstallation cluster task has no effect.", + "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when ovn-kubernetes is used and true otherwise.", + "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration, if deployKubeProxy is true. If not specified, sensible defaults will be chosen by OpenShift directly.", + "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.", + "migration": "migration enables and configures cluster network migration, for network changes that cannot be made instantly.", + "additionalRoutingCapabilities": "additionalRoutingCapabilities describes components and relevant configuration providing additional routing capabilities. When set, it enables such components and the usage of the routing capabilities they provide for the machine network. Upstream operators, like MetalLB operator, requiring these capabilities may rely on, or automatically set this attribute. Network plugins may leverage advanced routing capabilities acquired through the enablement of these components but may require specific configuration on their side to do so; refer to their respective documentation and configuration options.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", + "hybridOverlayConfig": "hybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", + "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", + "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", + "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is 100.64.0.0/16", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is fd98::/64", + "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", + "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", + "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", + "routeAdvertisements": "routeAdvertisements determines if the functionality to advertise cluster network routes through a dynamic routing protocol, such as BGP, is enabled or not. This functionality is configured through the ovn-kubernetes RouteAdvertisements CRD. Requires the 'FRR' routing capability provider to be enabled as an additional routing capability. Allowed values are \"Enabled\", \"Disabled\" and ommited. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is \"Disabled\".", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig was used to configure the OpenShift SDN plugin. It is no longer used.", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored.", + "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_PolicyAuditConfig = map[string]string{ + "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", + "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", + "maxLogFiles": "maxLogFiles specifies the maximum number of ACL_audit log files that can be present.", + "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", + "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", +} + +func (PolicyAuditConfig) SwaggerDoc() map[string]string { + return map_PolicyAuditConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SFlowConfig = map[string]string{ + "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (SFlowConfig) SwaggerDoc() map[string]string { + return map_SFlowConfig +} + +var map_SimpleMacvlanConfig = map[string]string{ + "": "SimpleMacvlanConfig contains configurations for macvlan interface.", + "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", + "ipamConfig": "ipamConfig configures IPAM module will be used for IP Address Management (IPAM).", + "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", + "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", +} + +func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { + return map_SimpleMacvlanConfig +} + +var map_StaticIPAMAddresses = map[string]string{ + "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", + "address": "address is the IP address in CIDR format", + "gateway": "gateway is IP inside of subnet to designate as the gateway", +} + +func (StaticIPAMAddresses) SwaggerDoc() map[string]string { + return map_StaticIPAMAddresses +} + +var map_StaticIPAMConfig = map[string]string{ + "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", + "addresses": "addresses configures IP address for the interface", + "routes": "routes configures IP routes for the interface", + "dns": "dns configures DNS for the interface", +} + +func (StaticIPAMConfig) SwaggerDoc() map[string]string { + return map_StaticIPAMConfig +} + +var map_StaticIPAMDNS = map[string]string{ + "": "StaticIPAMDNS provides DNS related information for static IPAM", + "nameservers": "nameservers points DNS servers for IP lookup", + "domain": "domain configures the domainname the local domain used for short hostname lookups", + "search": "search configures priority ordered search domains for short hostname lookups", +} + +func (StaticIPAMDNS) SwaggerDoc() map[string]string { + return map_StaticIPAMDNS +} + +var map_StaticIPAMRoutes = map[string]string{ + "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", + "destination": "destination points the IP route destination", + "gateway": "gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", +} + +func (StaticIPAMRoutes) SwaggerDoc() map[string]string { + return map_StaticIPAMRoutes +} + +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + +var map_OpenShiftAPIServer = map[string]string{ + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", +} + +func (OpenShiftAPIServer) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServer +} + +var map_OpenShiftAPIServerList = map[string]string{ + "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerList +} + +var map_OpenShiftControllerManager = map[string]string{ + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OpenShiftControllerManager) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManager +} + +var map_OpenShiftControllerManagerList = map[string]string{ + "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerList +} + +var map_KubeScheduler = map[string]string{ + "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", +} + +func (KubeScheduler) SwaggerDoc() map[string]string { + return map_KubeScheduler +} + +var map_KubeSchedulerList = map[string]string{ + "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeSchedulerList) SwaggerDoc() map[string]string { + return map_KubeSchedulerList +} + +var map_ServiceCA = map[string]string{ + "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ServiceCA) SwaggerDoc() map[string]string { + return map_ServiceCA +} + +var map_ServiceCAList = map[string]string{ + "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCAList) SwaggerDoc() map[string]string { + return map_ServiceCAList +} + +var map_ServiceCatalogAPIServer = map[string]string{ + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServer +} + +var map_ServiceCatalogAPIServerList = map[string]string{ + "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServerList +} + +var map_ServiceCatalogControllerManager = map[string]string{ + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManager +} + +var map_ServiceCatalogControllerManagerList = map[string]string{ + "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManagerList +} + +var map_Storage = map[string]string{ + "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_StorageList = map[string]string{ + "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (StorageList) SwaggerDoc() map[string]string { + return map_StorageList +} + +var map_StorageSpec = map[string]string{ + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "vsphereStorageDriver": "vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", +} + +func (StorageSpec) SwaggerDoc() map[string]string { + return map_StorageSpec +} + +var map_StorageStatus = map[string]string{ + "": "StorageStatus defines the observed status of the cluster storage operator.", +} + +func (StorageStatus) SwaggerDoc() map[string]string { + return map_StorageStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tests-extension/vendor/modules.txt b/openshift/tests-extension/vendor/modules.txt index e0c2b4b63..afd6716ba 100644 --- a/openshift/tests-extension/vendor/modules.txt +++ b/openshift/tests-extension/vendor/modules.txt @@ -4,6 +4,9 @@ cel.dev/expr # github.com/antlr4-go/antlr/v4 v4.13.1 ## explicit; go 1.22 github.com/antlr4-go/antlr/v4 +# github.com/blang/semver/v4 v4.0.0 +## explicit; go 1.14 +github.com/blang/semver/v4 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -156,11 +159,16 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/junit github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20250718204806-3333746edfbf +# github.com/openshift/api v0.0.0-20250808142411-c974eeafe3f1 ## explicit; go 1.24.0 +github.com/openshift/api/build/v1 github.com/openshift/api/config/v1 github.com/openshift/api/config/v1alpha1 github.com/openshift/api/config/v1alpha2 +github.com/openshift/api/image/docker10 +github.com/openshift/api/image/dockerpre012 +github.com/openshift/api/image/v1 +github.com/openshift/api/operator/v1 # github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee ## explicit; go 1.24.0 github.com/openshift/client-go/config/applyconfigurations/config/v1